mirror of https://gitee.com/openkylin/linux.git
block: remove REQ_NOWAIT_INLINE
We had a few issues with this code, and there's still a problem around how we deal with error handling for chained/split bios. For now, just revert the code and we'll try again with a thoroug solution. This reverts commits:e15c2ffa10
("block: fix O_DIRECT error handling for bio fragments")0eb6ddfb86
("block: Fix __blkdev_direct_IO() for bio fragments")6a43074e2f
("block: properly handle IOCB_NOWAIT for async O_DIRECT IO")893a1c9720
("blk-mq: allow REQ_NOWAIT to return an error inline") Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
99c79f6692
commit
7b6620d7db
|
@ -1958,13 +1958,9 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
||||||
rq = blk_mq_get_request(q, bio, &data);
|
rq = blk_mq_get_request(q, bio, &data);
|
||||||
if (unlikely(!rq)) {
|
if (unlikely(!rq)) {
|
||||||
rq_qos_cleanup(q, bio);
|
rq_qos_cleanup(q, bio);
|
||||||
|
if (bio->bi_opf & REQ_NOWAIT)
|
||||||
cookie = BLK_QC_T_NONE;
|
|
||||||
if (bio->bi_opf & REQ_NOWAIT_INLINE)
|
|
||||||
cookie = BLK_QC_T_EAGAIN;
|
|
||||||
else if (bio->bi_opf & REQ_NOWAIT)
|
|
||||||
bio_wouldblock_error(bio);
|
bio_wouldblock_error(bio);
|
||||||
return cookie;
|
return BLK_QC_T_NONE;
|
||||||
}
|
}
|
||||||
|
|
||||||
trace_block_getrq(q, bio, bio->bi_opf);
|
trace_block_getrq(q, bio, bio->bi_opf);
|
||||||
|
|
|
@ -345,24 +345,15 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
|
||||||
struct bio *bio;
|
struct bio *bio;
|
||||||
bool is_poll = (iocb->ki_flags & IOCB_HIPRI) != 0;
|
bool is_poll = (iocb->ki_flags & IOCB_HIPRI) != 0;
|
||||||
bool is_read = (iov_iter_rw(iter) == READ), is_sync;
|
bool is_read = (iov_iter_rw(iter) == READ), is_sync;
|
||||||
bool nowait = (iocb->ki_flags & IOCB_NOWAIT) != 0;
|
|
||||||
loff_t pos = iocb->ki_pos;
|
loff_t pos = iocb->ki_pos;
|
||||||
blk_qc_t qc = BLK_QC_T_NONE;
|
blk_qc_t qc = BLK_QC_T_NONE;
|
||||||
gfp_t gfp;
|
int ret = 0;
|
||||||
int ret;
|
|
||||||
|
|
||||||
if ((pos | iov_iter_alignment(iter)) &
|
if ((pos | iov_iter_alignment(iter)) &
|
||||||
(bdev_logical_block_size(bdev) - 1))
|
(bdev_logical_block_size(bdev) - 1))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (nowait)
|
bio = bio_alloc_bioset(GFP_KERNEL, nr_pages, &blkdev_dio_pool);
|
||||||
gfp = GFP_NOWAIT;
|
|
||||||
else
|
|
||||||
gfp = GFP_KERNEL;
|
|
||||||
|
|
||||||
bio = bio_alloc_bioset(gfp, nr_pages, &blkdev_dio_pool);
|
|
||||||
if (!bio)
|
|
||||||
return -EAGAIN;
|
|
||||||
|
|
||||||
dio = container_of(bio, struct blkdev_dio, bio);
|
dio = container_of(bio, struct blkdev_dio, bio);
|
||||||
dio->is_sync = is_sync = is_sync_kiocb(iocb);
|
dio->is_sync = is_sync = is_sync_kiocb(iocb);
|
||||||
|
@ -384,7 +375,6 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
|
||||||
if (!is_poll)
|
if (!is_poll)
|
||||||
blk_start_plug(&plug);
|
blk_start_plug(&plug);
|
||||||
|
|
||||||
ret = 0;
|
|
||||||
for (;;) {
|
for (;;) {
|
||||||
bio_set_dev(bio, bdev);
|
bio_set_dev(bio, bdev);
|
||||||
bio->bi_iter.bi_sector = pos >> 9;
|
bio->bi_iter.bi_sector = pos >> 9;
|
||||||
|
@ -409,14 +399,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
|
||||||
task_io_account_write(bio->bi_iter.bi_size);
|
task_io_account_write(bio->bi_iter.bi_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
dio->size += bio->bi_iter.bi_size;
|
||||||
* Tell underlying layer to not block for resource shortage.
|
|
||||||
* And if we would have blocked, return error inline instead
|
|
||||||
* of through the bio->bi_end_io() callback.
|
|
||||||
*/
|
|
||||||
if (nowait)
|
|
||||||
bio->bi_opf |= (REQ_NOWAIT | REQ_NOWAIT_INLINE);
|
|
||||||
|
|
||||||
pos += bio->bi_iter.bi_size;
|
pos += bio->bi_iter.bi_size;
|
||||||
|
|
||||||
nr_pages = iov_iter_npages(iter, BIO_MAX_PAGES);
|
nr_pages = iov_iter_npages(iter, BIO_MAX_PAGES);
|
||||||
|
@ -428,13 +411,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
|
||||||
polled = true;
|
polled = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
dio->size += bio->bi_iter.bi_size;
|
|
||||||
qc = submit_bio(bio);
|
qc = submit_bio(bio);
|
||||||
if (qc == BLK_QC_T_EAGAIN) {
|
|
||||||
dio->size -= bio->bi_iter.bi_size;
|
|
||||||
ret = -EAGAIN;
|
|
||||||
goto error;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (polled)
|
if (polled)
|
||||||
WRITE_ONCE(iocb->ki_cookie, qc);
|
WRITE_ONCE(iocb->ki_cookie, qc);
|
||||||
|
@ -455,19 +432,8 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
|
||||||
atomic_inc(&dio->ref);
|
atomic_inc(&dio->ref);
|
||||||
}
|
}
|
||||||
|
|
||||||
dio->size += bio->bi_iter.bi_size;
|
submit_bio(bio);
|
||||||
qc = submit_bio(bio);
|
bio = bio_alloc(GFP_KERNEL, nr_pages);
|
||||||
if (qc == BLK_QC_T_EAGAIN) {
|
|
||||||
dio->size -= bio->bi_iter.bi_size;
|
|
||||||
ret = -EAGAIN;
|
|
||||||
goto error;
|
|
||||||
}
|
|
||||||
|
|
||||||
bio = bio_alloc(gfp, nr_pages);
|
|
||||||
if (!bio) {
|
|
||||||
ret = -EAGAIN;
|
|
||||||
goto error;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!is_poll)
|
if (!is_poll)
|
||||||
|
@ -487,7 +453,6 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
|
||||||
}
|
}
|
||||||
__set_current_state(TASK_RUNNING);
|
__set_current_state(TASK_RUNNING);
|
||||||
|
|
||||||
out:
|
|
||||||
if (!ret)
|
if (!ret)
|
||||||
ret = blk_status_to_errno(dio->bio.bi_status);
|
ret = blk_status_to_errno(dio->bio.bi_status);
|
||||||
if (likely(!ret))
|
if (likely(!ret))
|
||||||
|
@ -495,10 +460,6 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
|
||||||
|
|
||||||
bio_put(&dio->bio);
|
bio_put(&dio->bio);
|
||||||
return ret;
|
return ret;
|
||||||
error:
|
|
||||||
if (!is_poll)
|
|
||||||
blk_finish_plug(&plug);
|
|
||||||
goto out;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static ssize_t
|
static ssize_t
|
||||||
|
|
|
@ -311,7 +311,6 @@ enum req_flag_bits {
|
||||||
__REQ_RAHEAD, /* read ahead, can fail anytime */
|
__REQ_RAHEAD, /* read ahead, can fail anytime */
|
||||||
__REQ_BACKGROUND, /* background IO */
|
__REQ_BACKGROUND, /* background IO */
|
||||||
__REQ_NOWAIT, /* Don't wait if request will block */
|
__REQ_NOWAIT, /* Don't wait if request will block */
|
||||||
__REQ_NOWAIT_INLINE, /* Return would-block error inline */
|
|
||||||
/*
|
/*
|
||||||
* When a shared kthread needs to issue a bio for a cgroup, doing
|
* When a shared kthread needs to issue a bio for a cgroup, doing
|
||||||
* so synchronously can lead to priority inversions as the kthread
|
* so synchronously can lead to priority inversions as the kthread
|
||||||
|
@ -346,7 +345,6 @@ enum req_flag_bits {
|
||||||
#define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
|
#define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
|
||||||
#define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND)
|
#define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND)
|
||||||
#define REQ_NOWAIT (1ULL << __REQ_NOWAIT)
|
#define REQ_NOWAIT (1ULL << __REQ_NOWAIT)
|
||||||
#define REQ_NOWAIT_INLINE (1ULL << __REQ_NOWAIT_INLINE)
|
|
||||||
#define REQ_CGROUP_PUNT (1ULL << __REQ_CGROUP_PUNT)
|
#define REQ_CGROUP_PUNT (1ULL << __REQ_CGROUP_PUNT)
|
||||||
|
|
||||||
#define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP)
|
#define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP)
|
||||||
|
@ -420,13 +418,12 @@ static inline int op_stat_group(unsigned int op)
|
||||||
|
|
||||||
typedef unsigned int blk_qc_t;
|
typedef unsigned int blk_qc_t;
|
||||||
#define BLK_QC_T_NONE -1U
|
#define BLK_QC_T_NONE -1U
|
||||||
#define BLK_QC_T_EAGAIN -2U
|
|
||||||
#define BLK_QC_T_SHIFT 16
|
#define BLK_QC_T_SHIFT 16
|
||||||
#define BLK_QC_T_INTERNAL (1U << 31)
|
#define BLK_QC_T_INTERNAL (1U << 31)
|
||||||
|
|
||||||
static inline bool blk_qc_t_valid(blk_qc_t cookie)
|
static inline bool blk_qc_t_valid(blk_qc_t cookie)
|
||||||
{
|
{
|
||||||
return cookie != BLK_QC_T_NONE && cookie != BLK_QC_T_EAGAIN;
|
return cookie != BLK_QC_T_NONE;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)
|
static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie)
|
||||||
|
|
Loading…
Reference in New Issue