mirror of https://gitee.com/openkylin/linux.git
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
Pull block layer fixes from Jens Axboe: "Three small fixes for 4.4 final. Specifically: - The segment issue fix from Junichi, where the old IO path does a bio limit split before potentially bouncing the pages. We need to do that in the right order, to ensure that limitations are met. - A NVMe surprise removal IO hang fix from Keith. - A use-after-free in null_blk, introduced by a previous patch in this series. From Mike Krinkin" * 'for-linus' of git://git.kernel.dk/linux-block: null_blk: fix use-after-free error block: ensure to split after potentially bouncing a bio NVMe: IO ending fixes on surprise removal
This commit is contained in:
commit
24bc3ea5df
|
@ -1689,8 +1689,6 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
|
||||||
struct request *req;
|
struct request *req;
|
||||||
unsigned int request_count = 0;
|
unsigned int request_count = 0;
|
||||||
|
|
||||||
blk_queue_split(q, &bio, q->bio_split);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* low level driver can indicate that it wants pages above a
|
* low level driver can indicate that it wants pages above a
|
||||||
* certain limit bounced to low memory (ie for highmem, or even
|
* certain limit bounced to low memory (ie for highmem, or even
|
||||||
|
@ -1698,6 +1696,8 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
|
||||||
*/
|
*/
|
||||||
blk_queue_bounce(q, &bio);
|
blk_queue_bounce(q, &bio);
|
||||||
|
|
||||||
|
blk_queue_split(q, &bio, q->bio_split);
|
||||||
|
|
||||||
if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
|
if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
|
||||||
bio->bi_error = -EIO;
|
bio->bi_error = -EIO;
|
||||||
bio_endio(bio);
|
bio_endio(bio);
|
||||||
|
|
|
@ -219,6 +219,9 @@ static void end_cmd(struct nullb_cmd *cmd)
|
||||||
{
|
{
|
||||||
struct request_queue *q = NULL;
|
struct request_queue *q = NULL;
|
||||||
|
|
||||||
|
if (cmd->rq)
|
||||||
|
q = cmd->rq->q;
|
||||||
|
|
||||||
switch (queue_mode) {
|
switch (queue_mode) {
|
||||||
case NULL_Q_MQ:
|
case NULL_Q_MQ:
|
||||||
blk_mq_end_request(cmd->rq, 0);
|
blk_mq_end_request(cmd->rq, 0);
|
||||||
|
@ -232,9 +235,6 @@ static void end_cmd(struct nullb_cmd *cmd)
|
||||||
goto free_cmd;
|
goto free_cmd;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cmd->rq)
|
|
||||||
q = cmd->rq->q;
|
|
||||||
|
|
||||||
/* Restart queue if needed, as we are freeing a tag */
|
/* Restart queue if needed, as we are freeing a tag */
|
||||||
if (q && !q->mq_ops && blk_queue_stopped(q)) {
|
if (q && !q->mq_ops && blk_queue_stopped(q)) {
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
|
@ -2540,8 +2540,17 @@ static void nvme_ns_remove(struct nvme_ns *ns)
|
||||||
{
|
{
|
||||||
bool kill = nvme_io_incapable(ns->dev) && !blk_queue_dying(ns->queue);
|
bool kill = nvme_io_incapable(ns->dev) && !blk_queue_dying(ns->queue);
|
||||||
|
|
||||||
if (kill)
|
if (kill) {
|
||||||
blk_set_queue_dying(ns->queue);
|
blk_set_queue_dying(ns->queue);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The controller was shutdown first if we got here through
|
||||||
|
* device removal. The shutdown may requeue outstanding
|
||||||
|
* requests. These need to be aborted immediately so
|
||||||
|
* del_gendisk doesn't block indefinitely for their completion.
|
||||||
|
*/
|
||||||
|
blk_mq_abort_requeue_list(ns->queue);
|
||||||
|
}
|
||||||
if (ns->disk->flags & GENHD_FL_UP)
|
if (ns->disk->flags & GENHD_FL_UP)
|
||||||
del_gendisk(ns->disk);
|
del_gendisk(ns->disk);
|
||||||
if (kill || !blk_queue_dying(ns->queue)) {
|
if (kill || !blk_queue_dying(ns->queue)) {
|
||||||
|
@ -2977,6 +2986,15 @@ static void nvme_dev_remove(struct nvme_dev *dev)
|
||||||
{
|
{
|
||||||
struct nvme_ns *ns, *next;
|
struct nvme_ns *ns, *next;
|
||||||
|
|
||||||
|
if (nvme_io_incapable(dev)) {
|
||||||
|
/*
|
||||||
|
* If the device is not capable of IO (surprise hot-removal,
|
||||||
|
* for example), we need to quiesce prior to deleting the
|
||||||
|
* namespaces. This will end outstanding requests and prevent
|
||||||
|
* attempts to sync dirty data.
|
||||||
|
*/
|
||||||
|
nvme_dev_shutdown(dev);
|
||||||
|
}
|
||||||
list_for_each_entry_safe(ns, next, &dev->namespaces, list)
|
list_for_each_entry_safe(ns, next, &dev->namespaces, list)
|
||||||
nvme_ns_remove(ns);
|
nvme_ns_remove(ns);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue