block: replace bi_bdev with a gendisk pointer and partitions index

This way we don't need a block_device structure to submit I/O.  The
block_device has different life time rules from the gendisk and
request_queue and is usually only available when the block device node
is open.  Other callers need to explicitly create one (e.g. the lightnvm
passthrough code, or the new nvme multipathing code).

For the actual I/O path all that we need is the gendisk, which exists
once per block device.  But given that the block layer also does
partition remapping we additionally need a partition index, which is
used for said remapping in generic_make_request.

Note that all the block drivers generally want request_queue or
sometimes the gendisk, so this removes a layer of indirection all
over the stack.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Christoph Hellwig 2017-08-23 19:10:32 +02:00 committed by Jens Axboe
parent c2ee070fb0
commit 74d46992e0
99 changed files with 358 additions and 357 deletions

View File

@ -110,7 +110,7 @@ axon_ram_irq_handler(int irq, void *dev)
static blk_qc_t static blk_qc_t
axon_ram_make_request(struct request_queue *queue, struct bio *bio) axon_ram_make_request(struct request_queue *queue, struct bio *bio)
{ {
struct axon_ram_bank *bank = bio->bi_bdev->bd_disk->private_data; struct axon_ram_bank *bank = bio->bi_disk->private_data;
unsigned long phys_mem, phys_end; unsigned long phys_mem, phys_end;
void *user_mem; void *user_mem;
struct bio_vec vec; struct bio_vec vec;

View File

@ -146,7 +146,7 @@ int bio_integrity_add_page(struct bio *bio, struct page *page,
iv = bip->bip_vec + bip->bip_vcnt; iv = bip->bip_vec + bip->bip_vcnt;
if (bip->bip_vcnt && if (bip->bip_vcnt &&
bvec_gap_to_prev(bdev_get_queue(bio->bi_bdev), bvec_gap_to_prev(bio->bi_disk->queue,
&bip->bip_vec[bip->bip_vcnt - 1], offset)) &bip->bip_vec[bip->bip_vcnt - 1], offset))
return 0; return 0;
@ -190,7 +190,7 @@ static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
static blk_status_t bio_integrity_process(struct bio *bio, static blk_status_t bio_integrity_process(struct bio *bio,
struct bvec_iter *proc_iter, integrity_processing_fn *proc_fn) struct bvec_iter *proc_iter, integrity_processing_fn *proc_fn)
{ {
struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); struct blk_integrity *bi = blk_get_integrity(bio->bi_disk);
struct blk_integrity_iter iter; struct blk_integrity_iter iter;
struct bvec_iter bviter; struct bvec_iter bviter;
struct bio_vec bv; struct bio_vec bv;
@ -199,7 +199,7 @@ static blk_status_t bio_integrity_process(struct bio *bio,
void *prot_buf = page_address(bip->bip_vec->bv_page) + void *prot_buf = page_address(bip->bip_vec->bv_page) +
bip->bip_vec->bv_offset; bip->bip_vec->bv_offset;
iter.disk_name = bio->bi_bdev->bd_disk->disk_name; iter.disk_name = bio->bi_disk->disk_name;
iter.interval = 1 << bi->interval_exp; iter.interval = 1 << bi->interval_exp;
iter.seed = proc_iter->bi_sector; iter.seed = proc_iter->bi_sector;
iter.prot_buf = prot_buf; iter.prot_buf = prot_buf;
@ -236,8 +236,8 @@ static blk_status_t bio_integrity_process(struct bio *bio,
bool bio_integrity_prep(struct bio *bio) bool bio_integrity_prep(struct bio *bio)
{ {
struct bio_integrity_payload *bip; struct bio_integrity_payload *bip;
struct blk_integrity *bi; struct blk_integrity *bi = blk_get_integrity(bio->bi_disk);
struct request_queue *q; struct request_queue *q = bio->bi_disk->queue;
void *buf; void *buf;
unsigned long start, end; unsigned long start, end;
unsigned int len, nr_pages; unsigned int len, nr_pages;
@ -245,11 +245,9 @@ bool bio_integrity_prep(struct bio *bio)
unsigned int intervals; unsigned int intervals;
blk_status_t status; blk_status_t status;
bi = bdev_get_integrity(bio->bi_bdev);
if (!bi) if (!bi)
return true; return true;
q = bdev_get_queue(bio->bi_bdev);
if (bio_op(bio) != REQ_OP_READ && bio_op(bio) != REQ_OP_WRITE) if (bio_op(bio) != REQ_OP_READ && bio_op(bio) != REQ_OP_WRITE)
return true; return true;
@ -354,7 +352,7 @@ static void bio_integrity_verify_fn(struct work_struct *work)
struct bio_integrity_payload *bip = struct bio_integrity_payload *bip =
container_of(work, struct bio_integrity_payload, bip_work); container_of(work, struct bio_integrity_payload, bip_work);
struct bio *bio = bip->bip_bio; struct bio *bio = bip->bip_bio;
struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); struct blk_integrity *bi = blk_get_integrity(bio->bi_disk);
struct bvec_iter iter = bio->bi_iter; struct bvec_iter iter = bio->bi_iter;
/* /*
@ -411,7 +409,7 @@ bool __bio_integrity_endio(struct bio *bio)
void bio_integrity_advance(struct bio *bio, unsigned int bytes_done) void bio_integrity_advance(struct bio *bio, unsigned int bytes_done)
{ {
struct bio_integrity_payload *bip = bio_integrity(bio); struct bio_integrity_payload *bip = bio_integrity(bio);
struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); struct blk_integrity *bi = blk_get_integrity(bio->bi_disk);
unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9); unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9);
bip->bip_iter.bi_sector += bytes_done >> 9; bip->bip_iter.bi_sector += bytes_done >> 9;
@ -428,7 +426,7 @@ EXPORT_SYMBOL(bio_integrity_advance);
void bio_integrity_trim(struct bio *bio) void bio_integrity_trim(struct bio *bio)
{ {
struct bio_integrity_payload *bip = bio_integrity(bio); struct bio_integrity_payload *bip = bio_integrity(bio);
struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); struct blk_integrity *bi = blk_get_integrity(bio->bi_disk);
bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio)); bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio));
} }

View File

@ -593,10 +593,10 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
BUG_ON(bio->bi_pool && BVEC_POOL_IDX(bio)); BUG_ON(bio->bi_pool && BVEC_POOL_IDX(bio));
/* /*
* most users will be overriding ->bi_bdev with a new target, * most users will be overriding ->bi_disk with a new target,
* so we don't set nor calculate new physical/hw segment counts here * so we don't set nor calculate new physical/hw segment counts here
*/ */
bio->bi_bdev = bio_src->bi_bdev; bio->bi_disk = bio_src->bi_disk;
bio_set_flag(bio, BIO_CLONED); bio_set_flag(bio, BIO_CLONED);
bio->bi_opf = bio_src->bi_opf; bio->bi_opf = bio_src->bi_opf;
bio->bi_write_hint = bio_src->bi_write_hint; bio->bi_write_hint = bio_src->bi_write_hint;
@ -681,7 +681,7 @@ struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs); bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs);
if (!bio) if (!bio)
return NULL; return NULL;
bio->bi_bdev = bio_src->bi_bdev; bio->bi_disk = bio_src->bi_disk;
bio->bi_opf = bio_src->bi_opf; bio->bi_opf = bio_src->bi_opf;
bio->bi_write_hint = bio_src->bi_write_hint; bio->bi_write_hint = bio_src->bi_write_hint;
bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector; bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
@ -1830,8 +1830,8 @@ void bio_endio(struct bio *bio)
goto again; goto again;
} }
if (bio->bi_bdev && bio_flagged(bio, BIO_TRACE_COMPLETION)) { if (bio->bi_disk && bio_flagged(bio, BIO_TRACE_COMPLETION)) {
trace_block_bio_complete(bdev_get_queue(bio->bi_bdev), bio, trace_block_bio_complete(bio->bi_disk->queue, bio,
blk_status_to_errno(bio->bi_status)); blk_status_to_errno(bio->bi_status));
bio_clear_flag(bio, BIO_TRACE_COMPLETION); bio_clear_flag(bio, BIO_TRACE_COMPLETION);
} }

View File

@ -1910,40 +1910,15 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
return BLK_QC_T_NONE; return BLK_QC_T_NONE;
} }
/*
* If bio->bi_dev is a partition, remap the location
*/
static inline void blk_partition_remap(struct bio *bio)
{
struct block_device *bdev = bio->bi_bdev;
/*
* Zone reset does not include bi_size so bio_sectors() is always 0.
* Include a test for the reset op code and perform the remap if needed.
*/
if (bdev != bdev->bd_contains &&
(bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET)) {
struct hd_struct *p = bdev->bd_part;
bio->bi_iter.bi_sector += p->start_sect;
bio->bi_bdev = bdev->bd_contains;
trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio,
bdev->bd_dev,
bio->bi_iter.bi_sector - p->start_sect);
}
}
static void handle_bad_sector(struct bio *bio) static void handle_bad_sector(struct bio *bio)
{ {
char b[BDEVNAME_SIZE]; char b[BDEVNAME_SIZE];
printk(KERN_INFO "attempt to access beyond end of device\n"); printk(KERN_INFO "attempt to access beyond end of device\n");
printk(KERN_INFO "%s: rw=%d, want=%Lu, limit=%Lu\n", printk(KERN_INFO "%s: rw=%d, want=%Lu, limit=%Lu\n",
bdevname(bio->bi_bdev, b), bio_devname(bio, b), bio->bi_opf,
bio->bi_opf,
(unsigned long long)bio_end_sector(bio), (unsigned long long)bio_end_sector(bio),
(long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9)); (long long)get_capacity(bio->bi_disk));
} }
#ifdef CONFIG_FAIL_MAKE_REQUEST #ifdef CONFIG_FAIL_MAKE_REQUEST
@ -1981,6 +1956,38 @@ static inline bool should_fail_request(struct hd_struct *part,
#endif /* CONFIG_FAIL_MAKE_REQUEST */ #endif /* CONFIG_FAIL_MAKE_REQUEST */
/*
* Remap block n of partition p to block n+start(p) of the disk.
*/
static inline int blk_partition_remap(struct bio *bio)
{
struct hd_struct *p;
int ret = 0;
/*
* Zone reset does not include bi_size so bio_sectors() is always 0.
* Include a test for the reset op code and perform the remap if needed.
*/
if (!bio->bi_partno ||
(!bio_sectors(bio) && bio_op(bio) != REQ_OP_ZONE_RESET))
return 0;
rcu_read_lock();
p = __disk_get_part(bio->bi_disk, bio->bi_partno);
if (likely(p && !should_fail_request(p, bio->bi_iter.bi_size))) {
bio->bi_iter.bi_sector += p->start_sect;
bio->bi_partno = 0;
trace_block_bio_remap(bio->bi_disk->queue, bio, part_devt(p),
bio->bi_iter.bi_sector - p->start_sect);
} else {
printk("%s: fail for partition %d\n", __func__, bio->bi_partno);
ret = -EIO;
}
rcu_read_unlock();
return ret;
}
/* /*
* Check whether this bio extends beyond the end of the device. * Check whether this bio extends beyond the end of the device.
*/ */
@ -1992,7 +1999,7 @@ static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
return 0; return 0;
/* Test device or partition size, when known. */ /* Test device or partition size, when known. */
maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9; maxsector = get_capacity(bio->bi_disk);
if (maxsector) { if (maxsector) {
sector_t sector = bio->bi_iter.bi_sector; sector_t sector = bio->bi_iter.bi_sector;
@ -2017,20 +2024,18 @@ generic_make_request_checks(struct bio *bio)
int nr_sectors = bio_sectors(bio); int nr_sectors = bio_sectors(bio);
blk_status_t status = BLK_STS_IOERR; blk_status_t status = BLK_STS_IOERR;
char b[BDEVNAME_SIZE]; char b[BDEVNAME_SIZE];
struct hd_struct *part;
might_sleep(); might_sleep();
if (bio_check_eod(bio, nr_sectors)) if (bio_check_eod(bio, nr_sectors))
goto end_io; goto end_io;
q = bdev_get_queue(bio->bi_bdev); q = bio->bi_disk->queue;
if (unlikely(!q)) { if (unlikely(!q)) {
printk(KERN_ERR printk(KERN_ERR
"generic_make_request: Trying to access " "generic_make_request: Trying to access "
"nonexistent block-device %s (%Lu)\n", "nonexistent block-device %s (%Lu)\n",
bdevname(bio->bi_bdev, b), bio_devname(bio, b), (long long)bio->bi_iter.bi_sector);
(long long) bio->bi_iter.bi_sector);
goto end_io; goto end_io;
} }
@ -2042,17 +2047,11 @@ generic_make_request_checks(struct bio *bio)
if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_rq_based(q)) if ((bio->bi_opf & REQ_NOWAIT) && !queue_is_rq_based(q))
goto not_supported; goto not_supported;
part = bio->bi_bdev->bd_part; if (should_fail_request(&bio->bi_disk->part0, bio->bi_iter.bi_size))
if (should_fail_request(part, bio->bi_iter.bi_size) ||
should_fail_request(&part_to_disk(part)->part0,
bio->bi_iter.bi_size))
goto end_io; goto end_io;
/* if (blk_partition_remap(bio))
* If this device has partitions, remap block n goto end_io;
* of partition p to block n+start(p) of the disk.
*/
blk_partition_remap(bio);
if (bio_check_eod(bio, nr_sectors)) if (bio_check_eod(bio, nr_sectors))
goto end_io; goto end_io;
@ -2081,16 +2080,16 @@ generic_make_request_checks(struct bio *bio)
goto not_supported; goto not_supported;
break; break;
case REQ_OP_WRITE_SAME: case REQ_OP_WRITE_SAME:
if (!bdev_write_same(bio->bi_bdev)) if (!q->limits.max_write_same_sectors)
goto not_supported; goto not_supported;
break; break;
case REQ_OP_ZONE_REPORT: case REQ_OP_ZONE_REPORT:
case REQ_OP_ZONE_RESET: case REQ_OP_ZONE_RESET:
if (!bdev_is_zoned(bio->bi_bdev)) if (!blk_queue_is_zoned(q))
goto not_supported; goto not_supported;
break; break;
case REQ_OP_WRITE_ZEROES: case REQ_OP_WRITE_ZEROES:
if (!bdev_write_zeroes_sectors(bio->bi_bdev)) if (!q->limits.max_write_zeroes_sectors)
goto not_supported; goto not_supported;
break; break;
default: default:
@ -2197,7 +2196,7 @@ blk_qc_t generic_make_request(struct bio *bio)
bio_list_init(&bio_list_on_stack[0]); bio_list_init(&bio_list_on_stack[0]);
current->bio_list = bio_list_on_stack; current->bio_list = bio_list_on_stack;
do { do {
struct request_queue *q = bdev_get_queue(bio->bi_bdev); struct request_queue *q = bio->bi_disk->queue;
if (likely(blk_queue_enter(q, bio->bi_opf & REQ_NOWAIT) == 0)) { if (likely(blk_queue_enter(q, bio->bi_opf & REQ_NOWAIT) == 0)) {
struct bio_list lower, same; struct bio_list lower, same;
@ -2215,7 +2214,7 @@ blk_qc_t generic_make_request(struct bio *bio)
bio_list_init(&lower); bio_list_init(&lower);
bio_list_init(&same); bio_list_init(&same);
while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL) while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
if (q == bdev_get_queue(bio->bi_bdev)) if (q == bio->bi_disk->queue)
bio_list_add(&same, bio); bio_list_add(&same, bio);
else else
bio_list_add(&lower, bio); bio_list_add(&lower, bio);
@ -2258,7 +2257,7 @@ blk_qc_t submit_bio(struct bio *bio)
unsigned int count; unsigned int count;
if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME)) if (unlikely(bio_op(bio) == REQ_OP_WRITE_SAME))
count = bdev_logical_block_size(bio->bi_bdev) >> 9; count = queue_logical_block_size(bio->bi_disk->queue);
else else
count = bio_sectors(bio); count = bio_sectors(bio);
@ -2275,8 +2274,7 @@ blk_qc_t submit_bio(struct bio *bio)
current->comm, task_pid_nr(current), current->comm, task_pid_nr(current),
op_is_write(bio_op(bio)) ? "WRITE" : "READ", op_is_write(bio_op(bio)) ? "WRITE" : "READ",
(unsigned long long)bio->bi_iter.bi_sector, (unsigned long long)bio->bi_iter.bi_sector,
bdevname(bio->bi_bdev, b), bio_devname(bio, b), count);
count);
} }
} }
@ -3049,8 +3047,8 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
rq->__data_len = bio->bi_iter.bi_size; rq->__data_len = bio->bi_iter.bi_size;
rq->bio = rq->biotail = bio; rq->bio = rq->biotail = bio;
if (bio->bi_bdev) if (bio->bi_disk)
rq->rq_disk = bio->bi_bdev->bd_disk; rq->rq_disk = bio->bi_disk;
} }
#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE

View File

@ -525,7 +525,7 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
return -ENXIO; return -ENXIO;
bio = bio_alloc(gfp_mask, 0); bio = bio_alloc(gfp_mask, 0);
bio->bi_bdev = bdev; bio_set_dev(bio, bdev);
bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
ret = submit_bio_wait(bio); ret = submit_bio_wait(bio);

View File

@ -77,7 +77,7 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
bio = next_bio(bio, 0, gfp_mask); bio = next_bio(bio, 0, gfp_mask);
bio->bi_iter.bi_sector = sector; bio->bi_iter.bi_sector = sector;
bio->bi_bdev = bdev; bio_set_dev(bio, bdev);
bio_set_op_attrs(bio, op, 0); bio_set_op_attrs(bio, op, 0);
bio->bi_iter.bi_size = req_sects << 9; bio->bi_iter.bi_size = req_sects << 9;
@ -168,7 +168,7 @@ static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
while (nr_sects) { while (nr_sects) {
bio = next_bio(bio, 1, gfp_mask); bio = next_bio(bio, 1, gfp_mask);
bio->bi_iter.bi_sector = sector; bio->bi_iter.bi_sector = sector;
bio->bi_bdev = bdev; bio_set_dev(bio, bdev);
bio->bi_vcnt = 1; bio->bi_vcnt = 1;
bio->bi_io_vec->bv_page = page; bio->bi_io_vec->bv_page = page;
bio->bi_io_vec->bv_offset = 0; bio->bi_io_vec->bv_offset = 0;
@ -241,7 +241,7 @@ static int __blkdev_issue_write_zeroes(struct block_device *bdev,
while (nr_sects) { while (nr_sects) {
bio = next_bio(bio, 0, gfp_mask); bio = next_bio(bio, 0, gfp_mask);
bio->bi_iter.bi_sector = sector; bio->bi_iter.bi_sector = sector;
bio->bi_bdev = bdev; bio_set_dev(bio, bdev);
bio->bi_opf = REQ_OP_WRITE_ZEROES; bio->bi_opf = REQ_OP_WRITE_ZEROES;
if (flags & BLKDEV_ZERO_NOUNMAP) if (flags & BLKDEV_ZERO_NOUNMAP)
bio->bi_opf |= REQ_NOUNMAP; bio->bi_opf |= REQ_NOUNMAP;
@ -323,7 +323,7 @@ int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
bio = next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects), bio = next_bio(bio, __blkdev_sectors_to_bio_pages(nr_sects),
gfp_mask); gfp_mask);
bio->bi_iter.bi_sector = sector; bio->bi_iter.bi_sector = sector;
bio->bi_bdev = bdev; bio_set_dev(bio, bdev);
bio_set_op_attrs(bio, REQ_OP_WRITE, 0); bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
while (nr_sects != 0) { while (nr_sects != 0) {

View File

@ -786,7 +786,7 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
return false; return false;
/* must be same device and not a special request */ /* must be same device and not a special request */
if (rq->rq_disk != bio->bi_bdev->bd_disk || req_no_special_merge(rq)) if (rq->rq_disk != bio->bi_disk || req_no_special_merge(rq))
return false; return false;
/* only merge integrity protected bio into ditto rq */ /* only merge integrity protected bio into ditto rq */

View File

@ -116,7 +116,7 @@ int blkdev_report_zones(struct block_device *bdev,
if (!bio) if (!bio)
return -ENOMEM; return -ENOMEM;
bio->bi_bdev = bdev; bio_set_dev(bio, bdev);
bio->bi_iter.bi_sector = blk_zone_start(q, sector); bio->bi_iter.bi_sector = blk_zone_start(q, sector);
bio_set_op_attrs(bio, REQ_OP_ZONE_REPORT, 0); bio_set_op_attrs(bio, REQ_OP_ZONE_REPORT, 0);
@ -234,7 +234,7 @@ int blkdev_reset_zones(struct block_device *bdev,
bio = bio_alloc(gfp_mask, 0); bio = bio_alloc(gfp_mask, 0);
bio->bi_iter.bi_sector = sector; bio->bi_iter.bi_sector = sector;
bio->bi_bdev = bdev; bio_set_dev(bio, bdev);
bio_set_op_attrs(bio, REQ_OP_ZONE_RESET, 0); bio_set_op_attrs(bio, REQ_OP_ZONE_RESET, 0);
ret = submit_bio_wait(bio); ret = submit_bio_wait(bio);

View File

@ -294,14 +294,13 @@ static int brd_do_bvec(struct brd_device *brd, struct page *page,
static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio) static blk_qc_t brd_make_request(struct request_queue *q, struct bio *bio)
{ {
struct block_device *bdev = bio->bi_bdev; struct brd_device *brd = bio->bi_disk->private_data;
struct brd_device *brd = bdev->bd_disk->private_data;
struct bio_vec bvec; struct bio_vec bvec;
sector_t sector; sector_t sector;
struct bvec_iter iter; struct bvec_iter iter;
sector = bio->bi_iter.bi_sector; sector = bio->bi_iter.bi_sector;
if (bio_end_sector(bio) > get_capacity(bdev->bd_disk)) if (bio_end_sector(bio) > get_capacity(bio->bi_disk))
goto io_error; goto io_error;
bio_for_each_segment(bvec, bio, iter) { bio_for_each_segment(bvec, bio, iter) {

View File

@ -151,7 +151,7 @@ static int _drbd_md_sync_page_io(struct drbd_device *device,
op_flags |= REQ_SYNC; op_flags |= REQ_SYNC;
bio = bio_alloc_drbd(GFP_NOIO); bio = bio_alloc_drbd(GFP_NOIO);
bio->bi_bdev = bdev->md_bdev; bio_set_dev(bio, bdev->md_bdev);
bio->bi_iter.bi_sector = sector; bio->bi_iter.bi_sector = sector;
err = -EIO; err = -EIO;
if (bio_add_page(bio, device->md_io.page, size, 0) != size) if (bio_add_page(bio, device->md_io.page, size, 0) != size)

View File

@ -1019,7 +1019,7 @@ static void bm_page_io_async(struct drbd_bm_aio_ctx *ctx, int page_nr) __must_ho
bm_store_page_idx(page, page_nr); bm_store_page_idx(page, page_nr);
} else } else
page = b->bm_pages[page_nr]; page = b->bm_pages[page_nr];
bio->bi_bdev = device->ldev->md_bdev; bio_set_dev(bio, device->ldev->md_bdev);
bio->bi_iter.bi_sector = on_disk_sector; bio->bi_iter.bi_sector = on_disk_sector;
/* bio_add_page of a single page to an empty bio will always succeed, /* bio_add_page of a single page to an empty bio will always succeed,
* according to api. Do we want to assert that? */ * according to api. Do we want to assert that? */

View File

@ -1628,8 +1628,8 @@ static inline void drbd_generic_make_request(struct drbd_device *device,
int fault_type, struct bio *bio) int fault_type, struct bio *bio)
{ {
__release(local); __release(local);
if (!bio->bi_bdev) { if (!bio->bi_disk) {
drbd_err(device, "drbd_generic_make_request: bio->bi_bdev == NULL\n"); drbd_err(device, "drbd_generic_make_request: bio->bi_disk == NULL\n");
bio->bi_status = BLK_STS_IOERR; bio->bi_status = BLK_STS_IOERR;
bio_endio(bio); bio_endio(bio);
return; return;

View File

@ -1265,7 +1265,7 @@ static void submit_one_flush(struct drbd_device *device, struct issue_flush_cont
octx->device = device; octx->device = device;
octx->ctx = ctx; octx->ctx = ctx;
bio->bi_bdev = device->ldev->backing_bdev; bio_set_dev(bio, device->ldev->backing_bdev);
bio->bi_private = octx; bio->bi_private = octx;
bio->bi_end_io = one_flush_endio; bio->bi_end_io = one_flush_endio;
bio->bi_opf = REQ_OP_FLUSH | REQ_PREFLUSH; bio->bi_opf = REQ_OP_FLUSH | REQ_PREFLUSH;
@ -1548,7 +1548,7 @@ int drbd_submit_peer_request(struct drbd_device *device,
} }
/* > peer_req->i.sector, unless this is the first bio */ /* > peer_req->i.sector, unless this is the first bio */
bio->bi_iter.bi_sector = sector; bio->bi_iter.bi_sector = sector;
bio->bi_bdev = device->ldev->backing_bdev; bio_set_dev(bio, device->ldev->backing_bdev);
bio_set_op_attrs(bio, op, op_flags); bio_set_op_attrs(bio, op, op_flags);
bio->bi_private = peer_req; bio->bi_private = peer_req;
bio->bi_end_io = drbd_peer_request_endio; bio->bi_end_io = drbd_peer_request_endio;

View File

@ -1179,7 +1179,7 @@ drbd_submit_req_private_bio(struct drbd_request *req)
else else
type = DRBD_FAULT_DT_RD; type = DRBD_FAULT_DT_RD;
bio->bi_bdev = device->ldev->backing_bdev; bio_set_dev(bio, device->ldev->backing_bdev);
/* State may have changed since we grabbed our reference on the /* State may have changed since we grabbed our reference on the
* ->ldev member. Double check, and short-circuit to endio. * ->ldev member. Double check, and short-circuit to endio.

View File

@ -1513,7 +1513,7 @@ int w_restart_disk_io(struct drbd_work *w, int cancel)
drbd_al_begin_io(device, &req->i); drbd_al_begin_io(device, &req->i);
drbd_req_make_private_bio(req, req->master_bio); drbd_req_make_private_bio(req, req->master_bio);
req->private_bio->bi_bdev = device->ldev->backing_bdev; bio_set_dev(req->private_bio, device->ldev->backing_bdev);
generic_make_request(req->private_bio); generic_make_request(req->private_bio);
return 0; return 0;

View File

@ -4134,7 +4134,7 @@ static int __floppy_read_block_0(struct block_device *bdev, int drive)
cbdata.drive = drive; cbdata.drive = drive;
bio_init(&bio, &bio_vec, 1); bio_init(&bio, &bio_vec, 1);
bio.bi_bdev = bdev; bio_set_dev(&bio, bdev);
bio_add_page(&bio, page, size, 0); bio_add_page(&bio, page, size, 0);
bio.bi_iter.bi_sector = 0; bio.bi_iter.bi_sector = 0;

View File

@ -1028,7 +1028,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
bio = pkt->r_bios[f]; bio = pkt->r_bios[f];
bio_reset(bio); bio_reset(bio);
bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9); bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
bio->bi_bdev = pd->bdev; bio_set_dev(bio, pd->bdev);
bio->bi_end_io = pkt_end_io_read; bio->bi_end_io = pkt_end_io_read;
bio->bi_private = pkt; bio->bi_private = pkt;
@ -1122,7 +1122,7 @@ static int pkt_start_recovery(struct packet_data *pkt)
pkt->sector = new_sector; pkt->sector = new_sector;
bio_reset(pkt->bio); bio_reset(pkt->bio);
pkt->bio->bi_bdev = pd->bdev; bio_set_set(pkt->bio, pd->bdev);
bio_set_op_attrs(pkt->bio, REQ_OP_WRITE, 0); bio_set_op_attrs(pkt->bio, REQ_OP_WRITE, 0);
pkt->bio->bi_iter.bi_sector = new_sector; pkt->bio->bi_iter.bi_sector = new_sector;
pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE; pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE;
@ -1267,7 +1267,7 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
bio_reset(pkt->w_bio); bio_reset(pkt->w_bio);
pkt->w_bio->bi_iter.bi_sector = pkt->sector; pkt->w_bio->bi_iter.bi_sector = pkt->sector;
pkt->w_bio->bi_bdev = pd->bdev; bio_set_dev(pkt->w_bio, pd->bdev);
pkt->w_bio->bi_end_io = pkt_end_io_packet_write; pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
pkt->w_bio->bi_private = pkt; pkt->w_bio->bi_private = pkt;
@ -2314,7 +2314,7 @@ static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio)
psd->pd = pd; psd->pd = pd;
psd->bio = bio; psd->bio = bio;
cloned_bio->bi_bdev = pd->bdev; bio_set_dev(cloned_bio, pd->bdev);
cloned_bio->bi_private = psd; cloned_bio->bi_private = psd;
cloned_bio->bi_end_io = pkt_end_io_read_cloned; cloned_bio->bi_end_io = pkt_end_io_read_cloned;
pd->stats.secs_r += bio_sectors(bio); pd->stats.secs_r += bio_sectors(bio);
@ -2415,8 +2415,7 @@ static blk_qc_t pkt_make_request(struct request_queue *q, struct bio *bio)
pd = q->queuedata; pd = q->queuedata;
if (!pd) { if (!pd) {
pr_err("%s incorrect request queue\n", pr_err("%s incorrect request queue\n", bio_devname(bio, b));
bdevname(bio->bi_bdev, b));
goto end_io; goto end_io;
} }

View File

@ -1363,7 +1363,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
goto fail_put_bio; goto fail_put_bio;
biolist[nbio++] = bio; biolist[nbio++] = bio;
bio->bi_bdev = preq.bdev; bio_set_dev(bio, preq.bdev);
bio->bi_private = pending_req; bio->bi_private = pending_req;
bio->bi_end_io = end_block_io_op; bio->bi_end_io = end_block_io_op;
bio->bi_iter.bi_sector = preq.sector_number; bio->bi_iter.bi_sector = preq.sector_number;
@ -1382,7 +1382,7 @@ static int dispatch_rw_block_io(struct xen_blkif_ring *ring,
goto fail_put_bio; goto fail_put_bio;
biolist[nbio++] = bio; biolist[nbio++] = bio;
bio->bi_bdev = preq.bdev; bio_set_dev(bio, preq.bdev);
bio->bi_private = pending_req; bio->bi_private = pending_req;
bio->bi_end_io = end_block_io_op; bio->bi_end_io = end_block_io_op;
bio_set_op_attrs(bio, operation, operation_flags); bio_set_op_attrs(bio, operation, operation_flags);

View File

@ -49,7 +49,7 @@ void bch_btree_verify(struct btree *b)
v->keys.ops = b->keys.ops; v->keys.ops = b->keys.ops;
bio = bch_bbio_alloc(b->c); bio = bch_bbio_alloc(b->c);
bio->bi_bdev = PTR_CACHE(b->c, &b->key, 0)->bdev; bio_set_dev(bio, PTR_CACHE(b->c, &b->key, 0)->bdev);
bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0); bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0);
bio->bi_iter.bi_size = KEY_SIZE(&v->key) << 9; bio->bi_iter.bi_size = KEY_SIZE(&v->key) << 9;
bio->bi_opf = REQ_OP_READ | REQ_META; bio->bi_opf = REQ_OP_READ | REQ_META;

View File

@ -34,7 +34,7 @@ void __bch_submit_bbio(struct bio *bio, struct cache_set *c)
struct bbio *b = container_of(bio, struct bbio, bio); struct bbio *b = container_of(bio, struct bbio, bio);
bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0); bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0);
bio->bi_bdev = PTR_CACHE(c, &b->key, 0)->bdev; bio_set_dev(bio, PTR_CACHE(c, &b->key, 0)->bdev);
b->submit_time_us = local_clock_us(); b->submit_time_us = local_clock_us();
closure_bio_submit(bio, bio->bi_private); closure_bio_submit(bio, bio->bi_private);

View File

@ -53,7 +53,7 @@ reread: left = ca->sb.bucket_size - offset;
bio_reset(bio); bio_reset(bio);
bio->bi_iter.bi_sector = bucket + offset; bio->bi_iter.bi_sector = bucket + offset;
bio->bi_bdev = ca->bdev; bio_set_dev(bio, ca->bdev);
bio->bi_iter.bi_size = len << 9; bio->bi_iter.bi_size = len << 9;
bio->bi_end_io = journal_read_endio; bio->bi_end_io = journal_read_endio;
@ -452,7 +452,7 @@ static void do_journal_discard(struct cache *ca)
bio_set_op_attrs(bio, REQ_OP_DISCARD, 0); bio_set_op_attrs(bio, REQ_OP_DISCARD, 0);
bio->bi_iter.bi_sector = bucket_to_sector(ca->set, bio->bi_iter.bi_sector = bucket_to_sector(ca->set,
ca->sb.d[ja->discard_idx]); ca->sb.d[ja->discard_idx]);
bio->bi_bdev = ca->bdev; bio_set_dev(bio, ca->bdev);
bio->bi_iter.bi_size = bucket_bytes(ca); bio->bi_iter.bi_size = bucket_bytes(ca);
bio->bi_end_io = journal_discard_endio; bio->bi_end_io = journal_discard_endio;
@ -623,7 +623,7 @@ static void journal_write_unlocked(struct closure *cl)
bio_reset(bio); bio_reset(bio);
bio->bi_iter.bi_sector = PTR_OFFSET(k, i); bio->bi_iter.bi_sector = PTR_OFFSET(k, i);
bio->bi_bdev = ca->bdev; bio_set_dev(bio, ca->bdev);
bio->bi_iter.bi_size = sectors << 9; bio->bi_iter.bi_size = sectors << 9;
bio->bi_end_io = journal_write_endio; bio->bi_end_io = journal_write_endio;

View File

@ -607,7 +607,7 @@ static void request_endio(struct bio *bio)
static void bio_complete(struct search *s) static void bio_complete(struct search *s)
{ {
if (s->orig_bio) { if (s->orig_bio) {
struct request_queue *q = bdev_get_queue(s->orig_bio->bi_bdev); struct request_queue *q = s->orig_bio->bi_disk->queue;
generic_end_io_acct(q, bio_data_dir(s->orig_bio), generic_end_io_acct(q, bio_data_dir(s->orig_bio),
&s->d->disk->part0, s->start_time); &s->d->disk->part0, s->start_time);
@ -735,7 +735,7 @@ static void cached_dev_read_done(struct closure *cl)
if (s->iop.bio) { if (s->iop.bio) {
bio_reset(s->iop.bio); bio_reset(s->iop.bio);
s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector; s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector;
s->iop.bio->bi_bdev = s->cache_miss->bi_bdev; bio_copy_dev(s->iop.bio, s->cache_miss);
s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9; s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
bch_bio_map(s->iop.bio, NULL); bch_bio_map(s->iop.bio, NULL);
@ -794,7 +794,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
!(bio->bi_opf & REQ_META) && !(bio->bi_opf & REQ_META) &&
s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA) s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
reada = min_t(sector_t, dc->readahead >> 9, reada = min_t(sector_t, dc->readahead >> 9,
bdev_sectors(bio->bi_bdev) - bio_end_sector(bio)); get_capacity(bio->bi_disk) - bio_end_sector(bio));
s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada); s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
@ -820,7 +820,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
goto out_submit; goto out_submit;
cache_bio->bi_iter.bi_sector = miss->bi_iter.bi_sector; cache_bio->bi_iter.bi_sector = miss->bi_iter.bi_sector;
cache_bio->bi_bdev = miss->bi_bdev; bio_copy_dev(cache_bio, miss);
cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9; cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
cache_bio->bi_end_io = request_endio; cache_bio->bi_end_io = request_endio;
@ -919,7 +919,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0, struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0,
dc->disk.bio_split); dc->disk.bio_split);
flush->bi_bdev = bio->bi_bdev; bio_copy_dev(flush, bio);
flush->bi_end_io = request_endio; flush->bi_end_io = request_endio;
flush->bi_private = cl; flush->bi_private = cl;
flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
@ -956,13 +956,13 @@ static blk_qc_t cached_dev_make_request(struct request_queue *q,
struct bio *bio) struct bio *bio)
{ {
struct search *s; struct search *s;
struct bcache_device *d = bio->bi_bdev->bd_disk->private_data; struct bcache_device *d = bio->bi_disk->private_data;
struct cached_dev *dc = container_of(d, struct cached_dev, disk); struct cached_dev *dc = container_of(d, struct cached_dev, disk);
int rw = bio_data_dir(bio); int rw = bio_data_dir(bio);
generic_start_io_acct(q, rw, bio_sectors(bio), &d->disk->part0); generic_start_io_acct(q, rw, bio_sectors(bio), &d->disk->part0);
bio->bi_bdev = dc->bdev; bio_set_dev(bio, dc->bdev);
bio->bi_iter.bi_sector += dc->sb.data_offset; bio->bi_iter.bi_sector += dc->sb.data_offset;
if (cached_dev_get(dc)) { if (cached_dev_get(dc)) {
@ -1072,7 +1072,7 @@ static blk_qc_t flash_dev_make_request(struct request_queue *q,
{ {
struct search *s; struct search *s;
struct closure *cl; struct closure *cl;
struct bcache_device *d = bio->bi_bdev->bd_disk->private_data; struct bcache_device *d = bio->bi_disk->private_data;
int rw = bio_data_dir(bio); int rw = bio_data_dir(bio);
generic_start_io_acct(q, rw, bio_sectors(bio), &d->disk->part0); generic_start_io_acct(q, rw, bio_sectors(bio), &d->disk->part0);

View File

@ -257,7 +257,7 @@ void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent)
closure_init(cl, parent); closure_init(cl, parent);
bio_reset(bio); bio_reset(bio);
bio->bi_bdev = dc->bdev; bio_set_dev(bio, dc->bdev);
bio->bi_end_io = write_bdev_super_endio; bio->bi_end_io = write_bdev_super_endio;
bio->bi_private = dc; bio->bi_private = dc;
@ -303,7 +303,7 @@ void bcache_write_super(struct cache_set *c)
SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb)); SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb));
bio_reset(bio); bio_reset(bio);
bio->bi_bdev = ca->bdev; bio_set_dev(bio, ca->bdev);
bio->bi_end_io = write_super_endio; bio->bi_end_io = write_super_endio;
bio->bi_private = ca; bio->bi_private = ca;
@ -508,7 +508,7 @@ static void prio_io(struct cache *ca, uint64_t bucket, int op,
closure_init_stack(cl); closure_init_stack(cl);
bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size; bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size;
bio->bi_bdev = ca->bdev; bio_set_dev(bio, ca->bdev);
bio->bi_iter.bi_size = bucket_bytes(ca); bio->bi_iter.bi_size = bucket_bytes(ca);
bio->bi_end_io = prio_endio; bio->bi_end_io = prio_endio;

View File

@ -181,7 +181,7 @@ static void write_dirty(struct closure *cl)
dirty_init(w); dirty_init(w);
bio_set_op_attrs(&io->bio, REQ_OP_WRITE, 0); bio_set_op_attrs(&io->bio, REQ_OP_WRITE, 0);
io->bio.bi_iter.bi_sector = KEY_START(&w->key); io->bio.bi_iter.bi_sector = KEY_START(&w->key);
io->bio.bi_bdev = io->dc->bdev; bio_set_dev(&io->bio, io->dc->bdev);
io->bio.bi_end_io = dirty_endio; io->bio.bi_end_io = dirty_endio;
closure_bio_submit(&io->bio, cl); closure_bio_submit(&io->bio, cl);
@ -250,8 +250,7 @@ static void read_dirty(struct cached_dev *dc)
dirty_init(w); dirty_init(w);
bio_set_op_attrs(&io->bio, REQ_OP_READ, 0); bio_set_op_attrs(&io->bio, REQ_OP_READ, 0);
io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0); io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
io->bio.bi_bdev = PTR_CACHE(dc->disk.c, bio_set_dev(&io->bio, PTR_CACHE(dc->disk.c, &w->key, 0)->bdev);
&w->key, 0)->bdev;
io->bio.bi_end_io = read_dirty_endio; io->bio.bi_end_io = read_dirty_endio;
if (bio_alloc_pages(&io->bio, GFP_KERNEL)) if (bio_alloc_pages(&io->bio, GFP_KERNEL))

View File

@ -18,21 +18,24 @@
*/ */
struct dm_bio_details { struct dm_bio_details {
struct block_device *bi_bdev; struct gendisk *bi_disk;
u8 bi_partno;
unsigned long bi_flags; unsigned long bi_flags;
struct bvec_iter bi_iter; struct bvec_iter bi_iter;
}; };
static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio) static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio)
{ {
bd->bi_bdev = bio->bi_bdev; bd->bi_disk = bio->bi_disk;
bd->bi_partno = bio->bi_partno;
bd->bi_flags = bio->bi_flags; bd->bi_flags = bio->bi_flags;
bd->bi_iter = bio->bi_iter; bd->bi_iter = bio->bi_iter;
} }
static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio) static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio)
{ {
bio->bi_bdev = bd->bi_bdev; bio->bi_disk = bd->bi_disk;
bio->bi_partno = bd->bi_partno;
bio->bi_flags = bd->bi_flags; bio->bi_flags = bd->bi_flags;
bio->bi_iter = bd->bi_iter; bio->bi_iter = bd->bi_iter;
} }

View File

@ -616,7 +616,7 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t sector,
bio_init(&b->bio, b->bio_vec, DM_BUFIO_INLINE_VECS); bio_init(&b->bio, b->bio_vec, DM_BUFIO_INLINE_VECS);
b->bio.bi_iter.bi_sector = sector; b->bio.bi_iter.bi_sector = sector;
b->bio.bi_bdev = b->c->bdev; bio_set_dev(&b->bio, b->c->bdev);
b->bio.bi_end_io = inline_endio; b->bio.bi_end_io = inline_endio;
/* /*
* Use of .bi_private isn't a problem here because * Use of .bi_private isn't a problem here because

View File

@ -833,7 +833,7 @@ static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b)
*--------------------------------------------------------------*/ *--------------------------------------------------------------*/
static void remap_to_origin(struct cache *cache, struct bio *bio) static void remap_to_origin(struct cache *cache, struct bio *bio)
{ {
bio->bi_bdev = cache->origin_dev->bdev; bio_set_dev(bio, cache->origin_dev->bdev);
} }
static void remap_to_cache(struct cache *cache, struct bio *bio, static void remap_to_cache(struct cache *cache, struct bio *bio,
@ -842,7 +842,7 @@ static void remap_to_cache(struct cache *cache, struct bio *bio,
sector_t bi_sector = bio->bi_iter.bi_sector; sector_t bi_sector = bio->bi_iter.bi_sector;
sector_t block = from_cblock(cblock); sector_t block = from_cblock(cblock);
bio->bi_bdev = cache->cache_dev->bdev; bio_set_dev(bio, cache->cache_dev->bdev);
if (!block_size_is_power_of_two(cache)) if (!block_size_is_power_of_two(cache))
bio->bi_iter.bi_sector = bio->bi_iter.bi_sector =
(block * cache->sectors_per_block) + (block * cache->sectors_per_block) +

View File

@ -1544,7 +1544,7 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone)
clone->bi_private = io; clone->bi_private = io;
clone->bi_end_io = crypt_endio; clone->bi_end_io = crypt_endio;
clone->bi_bdev = cc->dev->bdev; bio_set_dev(clone, cc->dev->bdev);
clone->bi_opf = io->base_bio->bi_opf; clone->bi_opf = io->base_bio->bi_opf;
} }
@ -2793,7 +2793,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
*/ */
if (unlikely(bio->bi_opf & REQ_PREFLUSH || if (unlikely(bio->bi_opf & REQ_PREFLUSH ||
bio_op(bio) == REQ_OP_DISCARD)) { bio_op(bio) == REQ_OP_DISCARD)) {
bio->bi_bdev = cc->dev->bdev; bio_set_dev(bio, cc->dev->bdev);
if (bio_sectors(bio)) if (bio_sectors(bio))
bio->bi_iter.bi_sector = cc->start + bio->bi_iter.bi_sector = cc->start +
dm_target_offset(ti, bio->bi_iter.bi_sector); dm_target_offset(ti, bio->bi_iter.bi_sector);

View File

@ -282,7 +282,7 @@ static int delay_map(struct dm_target *ti, struct bio *bio)
struct delay_c *dc = ti->private; struct delay_c *dc = ti->private;
if ((bio_data_dir(bio) == WRITE) && (dc->dev_write)) { if ((bio_data_dir(bio) == WRITE) && (dc->dev_write)) {
bio->bi_bdev = dc->dev_write->bdev; bio_set_dev(bio, dc->dev_write->bdev);
if (bio_sectors(bio)) if (bio_sectors(bio))
bio->bi_iter.bi_sector = dc->start_write + bio->bi_iter.bi_sector = dc->start_write +
dm_target_offset(ti, bio->bi_iter.bi_sector); dm_target_offset(ti, bio->bi_iter.bi_sector);
@ -290,7 +290,7 @@ static int delay_map(struct dm_target *ti, struct bio *bio)
return delay_bio(dc, dc->write_delay, bio); return delay_bio(dc, dc->write_delay, bio);
} }
bio->bi_bdev = dc->dev_read->bdev; bio_set_dev(bio, dc->dev_read->bdev);
bio->bi_iter.bi_sector = dc->start_read + bio->bi_iter.bi_sector = dc->start_read +
dm_target_offset(ti, bio->bi_iter.bi_sector); dm_target_offset(ti, bio->bi_iter.bi_sector);

View File

@ -1192,7 +1192,7 @@ static dm_block_t get_block(struct era *era, struct bio *bio)
static void remap_to_origin(struct era *era, struct bio *bio) static void remap_to_origin(struct era *era, struct bio *bio)
{ {
bio->bi_bdev = era->origin_dev->bdev; bio_set_dev(bio, era->origin_dev->bdev);
} }
/*---------------------------------------------------------------- /*----------------------------------------------------------------

View File

@ -274,7 +274,7 @@ static void flakey_map_bio(struct dm_target *ti, struct bio *bio)
{ {
struct flakey_c *fc = ti->private; struct flakey_c *fc = ti->private;
bio->bi_bdev = fc->dev->bdev; bio_set_dev(bio, fc->dev->bdev);
if (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET) if (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET)
bio->bi_iter.bi_sector = bio->bi_iter.bi_sector =
flakey_map_sector(ti, bio->bi_iter.bi_sector); flakey_map_sector(ti, bio->bi_iter.bi_sector);

View File

@ -250,7 +250,8 @@ struct dm_integrity_io {
struct completion *completion; struct completion *completion;
struct block_device *orig_bi_bdev; struct gendisk *orig_bi_disk;
u8 orig_bi_partno;
bio_end_io_t *orig_bi_end_io; bio_end_io_t *orig_bi_end_io;
struct bio_integrity_payload *orig_bi_integrity; struct bio_integrity_payload *orig_bi_integrity;
struct bvec_iter orig_bi_iter; struct bvec_iter orig_bi_iter;
@ -1164,7 +1165,8 @@ static void integrity_end_io(struct bio *bio)
struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io)); struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
bio->bi_iter = dio->orig_bi_iter; bio->bi_iter = dio->orig_bi_iter;
bio->bi_bdev = dio->orig_bi_bdev; bio->bi_disk = dio->orig_bi_disk;
bio->bi_partno = dio->orig_bi_partno;
if (dio->orig_bi_integrity) { if (dio->orig_bi_integrity) {
bio->bi_integrity = dio->orig_bi_integrity; bio->bi_integrity = dio->orig_bi_integrity;
bio->bi_opf |= REQ_INTEGRITY; bio->bi_opf |= REQ_INTEGRITY;
@ -1681,8 +1683,9 @@ static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map
dio->orig_bi_iter = bio->bi_iter; dio->orig_bi_iter = bio->bi_iter;
dio->orig_bi_bdev = bio->bi_bdev; dio->orig_bi_disk = bio->bi_disk;
bio->bi_bdev = ic->dev->bdev; dio->orig_bi_partno = bio->bi_partno;
bio_set_dev(bio, ic->dev->bdev);
dio->orig_bi_integrity = bio_integrity(bio); dio->orig_bi_integrity = bio_integrity(bio);
bio->bi_integrity = NULL; bio->bi_integrity = NULL;

View File

@ -347,7 +347,7 @@ static void do_region(int op, int op_flags, unsigned region,
bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios); bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
bio->bi_iter.bi_sector = where->sector + (where->count - remaining); bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
bio->bi_bdev = where->bdev; bio_set_dev(bio, where->bdev);
bio->bi_end_io = endio; bio->bi_end_io = endio;
bio_set_op_attrs(bio, op, op_flags); bio_set_op_attrs(bio, op, op_flags);
store_io_and_region_in_bio(bio, io, region); store_io_and_region_in_bio(bio, io, region);

View File

@ -88,7 +88,7 @@ static void linear_map_bio(struct dm_target *ti, struct bio *bio)
{ {
struct linear_c *lc = ti->private; struct linear_c *lc = ti->private;
bio->bi_bdev = lc->dev->bdev; bio_set_dev(bio, lc->dev->bdev);
if (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET) if (bio_sectors(bio) || bio_op(bio) == REQ_OP_ZONE_RESET)
bio->bi_iter.bi_sector = bio->bi_iter.bi_sector =
linear_map_sector(ti, bio->bi_iter.bi_sector); linear_map_sector(ti, bio->bi_iter.bi_sector);

View File

@ -198,7 +198,7 @@ static int write_metadata(struct log_writes_c *lc, void *entry,
} }
bio->bi_iter.bi_size = 0; bio->bi_iter.bi_size = 0;
bio->bi_iter.bi_sector = sector; bio->bi_iter.bi_sector = sector;
bio->bi_bdev = lc->logdev->bdev; bio_set_dev(bio, lc->logdev->bdev);
bio->bi_end_io = log_end_io; bio->bi_end_io = log_end_io;
bio->bi_private = lc; bio->bi_private = lc;
bio_set_op_attrs(bio, REQ_OP_WRITE, 0); bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
@ -263,7 +263,7 @@ static int log_one_block(struct log_writes_c *lc,
} }
bio->bi_iter.bi_size = 0; bio->bi_iter.bi_size = 0;
bio->bi_iter.bi_sector = sector; bio->bi_iter.bi_sector = sector;
bio->bi_bdev = lc->logdev->bdev; bio_set_dev(bio, lc->logdev->bdev);
bio->bi_end_io = log_end_io; bio->bi_end_io = log_end_io;
bio->bi_private = lc; bio->bi_private = lc;
bio_set_op_attrs(bio, REQ_OP_WRITE, 0); bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
@ -285,7 +285,7 @@ static int log_one_block(struct log_writes_c *lc,
} }
bio->bi_iter.bi_size = 0; bio->bi_iter.bi_size = 0;
bio->bi_iter.bi_sector = sector; bio->bi_iter.bi_sector = sector;
bio->bi_bdev = lc->logdev->bdev; bio_set_dev(bio, lc->logdev->bdev);
bio->bi_end_io = log_end_io; bio->bi_end_io = log_end_io;
bio->bi_private = lc; bio->bi_private = lc;
bio_set_op_attrs(bio, REQ_OP_WRITE, 0); bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
@ -539,7 +539,7 @@ static void normal_map_bio(struct dm_target *ti, struct bio *bio)
{ {
struct log_writes_c *lc = ti->private; struct log_writes_c *lc = ti->private;
bio->bi_bdev = lc->dev->bdev; bio_set_dev(bio, lc->dev->bdev);
} }
static int log_writes_map(struct dm_target *ti, struct bio *bio) static int log_writes_map(struct dm_target *ti, struct bio *bio)

View File

@ -566,7 +566,7 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m
mpio->nr_bytes = nr_bytes; mpio->nr_bytes = nr_bytes;
bio->bi_status = 0; bio->bi_status = 0;
bio->bi_bdev = pgpath->path.dev->bdev; bio_set_dev(bio, pgpath->path.dev->bdev);
bio->bi_opf |= REQ_FAILFAST_TRANSPORT; bio->bi_opf |= REQ_FAILFAST_TRANSPORT;
if (pgpath->pg->ps.type->start_io) if (pgpath->pg->ps.type->start_io)

View File

@ -145,7 +145,7 @@ static void dispatch_bios(void *context, struct bio_list *bio_list)
struct dm_raid1_bio_record { struct dm_raid1_bio_record {
struct mirror *m; struct mirror *m;
/* if details->bi_bdev == NULL, details were not saved */ /* if details->bi_disk == NULL, details were not saved */
struct dm_bio_details details; struct dm_bio_details details;
region_t write_region; region_t write_region;
}; };
@ -464,7 +464,7 @@ static sector_t map_sector(struct mirror *m, struct bio *bio)
static void map_bio(struct mirror *m, struct bio *bio) static void map_bio(struct mirror *m, struct bio *bio)
{ {
bio->bi_bdev = m->dev->bdev; bio_set_dev(bio, m->dev->bdev);
bio->bi_iter.bi_sector = map_sector(m, bio); bio->bi_iter.bi_sector = map_sector(m, bio);
} }
@ -1199,7 +1199,7 @@ static int mirror_map(struct dm_target *ti, struct bio *bio)
struct dm_raid1_bio_record *bio_record = struct dm_raid1_bio_record *bio_record =
dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record)); dm_per_bio_data(bio, sizeof(struct dm_raid1_bio_record));
bio_record->details.bi_bdev = NULL; bio_record->details.bi_disk = NULL;
if (rw == WRITE) { if (rw == WRITE) {
/* Save region for mirror_end_io() handler */ /* Save region for mirror_end_io() handler */
@ -1266,7 +1266,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
goto out; goto out;
if (unlikely(*error)) { if (unlikely(*error)) {
if (!bio_record->details.bi_bdev) { if (!bio_record->details.bi_disk) {
/* /*
* There wasn't enough memory to record necessary * There wasn't enough memory to record necessary
* information for a retry or there was no other * information for a retry or there was no other
@ -1291,7 +1291,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
bd = &bio_record->details; bd = &bio_record->details;
dm_bio_restore(bd, bio); dm_bio_restore(bd, bio);
bio_record->details.bi_bdev = NULL; bio_record->details.bi_disk = NULL;
bio->bi_status = 0; bio->bi_status = 0;
queue_bio(ms, bio, rw); queue_bio(ms, bio, rw);
@ -1301,7 +1301,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
} }
out: out:
bio_record->details.bi_bdev = NULL; bio_record->details.bi_disk = NULL;
return DM_ENDIO_DONE; return DM_ENDIO_DONE;
} }

View File

@ -1663,7 +1663,7 @@ __find_pending_exception(struct dm_snapshot *s,
static void remap_exception(struct dm_snapshot *s, struct dm_exception *e, static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
struct bio *bio, chunk_t chunk) struct bio *bio, chunk_t chunk)
{ {
bio->bi_bdev = s->cow->bdev; bio_set_dev(bio, s->cow->bdev);
bio->bi_iter.bi_sector = bio->bi_iter.bi_sector =
chunk_to_sector(s->store, dm_chunk_number(e->new_chunk) + chunk_to_sector(s->store, dm_chunk_number(e->new_chunk) +
(chunk - e->old_chunk)) + (chunk - e->old_chunk)) +
@ -1681,7 +1681,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
init_tracked_chunk(bio); init_tracked_chunk(bio);
if (bio->bi_opf & REQ_PREFLUSH) { if (bio->bi_opf & REQ_PREFLUSH) {
bio->bi_bdev = s->cow->bdev; bio_set_dev(bio, s->cow->bdev);
return DM_MAPIO_REMAPPED; return DM_MAPIO_REMAPPED;
} }
@ -1769,7 +1769,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
goto out; goto out;
} }
} else { } else {
bio->bi_bdev = s->origin->bdev; bio_set_dev(bio, s->origin->bdev);
track_chunk(s, bio, chunk); track_chunk(s, bio, chunk);
} }
@ -1802,9 +1802,9 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
if (bio->bi_opf & REQ_PREFLUSH) { if (bio->bi_opf & REQ_PREFLUSH) {
if (!dm_bio_get_target_bio_nr(bio)) if (!dm_bio_get_target_bio_nr(bio))
bio->bi_bdev = s->origin->bdev; bio_set_dev(bio, s->origin->bdev);
else else
bio->bi_bdev = s->cow->bdev; bio_set_dev(bio, s->cow->bdev);
return DM_MAPIO_REMAPPED; return DM_MAPIO_REMAPPED;
} }
@ -1824,7 +1824,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
chunk >= s->first_merging_chunk && chunk >= s->first_merging_chunk &&
chunk < (s->first_merging_chunk + chunk < (s->first_merging_chunk +
s->num_merging_chunks)) { s->num_merging_chunks)) {
bio->bi_bdev = s->origin->bdev; bio_set_dev(bio, s->origin->bdev);
bio_list_add(&s->bios_queued_during_merge, bio); bio_list_add(&s->bios_queued_during_merge, bio);
r = DM_MAPIO_SUBMITTED; r = DM_MAPIO_SUBMITTED;
goto out_unlock; goto out_unlock;
@ -1838,7 +1838,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
} }
redirect_to_origin: redirect_to_origin:
bio->bi_bdev = s->origin->bdev; bio_set_dev(bio, s->origin->bdev);
if (bio_data_dir(bio) == WRITE) { if (bio_data_dir(bio) == WRITE) {
up_write(&s->lock); up_write(&s->lock);
@ -2285,7 +2285,7 @@ static int origin_map(struct dm_target *ti, struct bio *bio)
struct dm_origin *o = ti->private; struct dm_origin *o = ti->private;
unsigned available_sectors; unsigned available_sectors;
bio->bi_bdev = o->dev->bdev; bio_set_dev(bio, o->dev->bdev);
if (unlikely(bio->bi_opf & REQ_PREFLUSH)) if (unlikely(bio->bi_opf & REQ_PREFLUSH))
return DM_MAPIO_REMAPPED; return DM_MAPIO_REMAPPED;

View File

@ -270,7 +270,7 @@ static int stripe_map_range(struct stripe_c *sc, struct bio *bio,
stripe_map_range_sector(sc, bio_end_sector(bio), stripe_map_range_sector(sc, bio_end_sector(bio),
target_stripe, &end); target_stripe, &end);
if (begin < end) { if (begin < end) {
bio->bi_bdev = sc->stripe[target_stripe].dev->bdev; bio_set_dev(bio, sc->stripe[target_stripe].dev->bdev);
bio->bi_iter.bi_sector = begin + bio->bi_iter.bi_sector = begin +
sc->stripe[target_stripe].physical_start; sc->stripe[target_stripe].physical_start;
bio->bi_iter.bi_size = to_bytes(end - begin); bio->bi_iter.bi_size = to_bytes(end - begin);
@ -291,7 +291,7 @@ static int stripe_map(struct dm_target *ti, struct bio *bio)
if (bio->bi_opf & REQ_PREFLUSH) { if (bio->bi_opf & REQ_PREFLUSH) {
target_bio_nr = dm_bio_get_target_bio_nr(bio); target_bio_nr = dm_bio_get_target_bio_nr(bio);
BUG_ON(target_bio_nr >= sc->stripes); BUG_ON(target_bio_nr >= sc->stripes);
bio->bi_bdev = sc->stripe[target_bio_nr].dev->bdev; bio_set_dev(bio, sc->stripe[target_bio_nr].dev->bdev);
return DM_MAPIO_REMAPPED; return DM_MAPIO_REMAPPED;
} }
if (unlikely(bio_op(bio) == REQ_OP_DISCARD) || if (unlikely(bio_op(bio) == REQ_OP_DISCARD) ||
@ -306,7 +306,7 @@ static int stripe_map(struct dm_target *ti, struct bio *bio)
&stripe, &bio->bi_iter.bi_sector); &stripe, &bio->bi_iter.bi_sector);
bio->bi_iter.bi_sector += sc->stripe[stripe].physical_start; bio->bi_iter.bi_sector += sc->stripe[stripe].physical_start;
bio->bi_bdev = sc->stripe[stripe].dev->bdev; bio_set_dev(bio, sc->stripe[stripe].dev->bdev);
return DM_MAPIO_REMAPPED; return DM_MAPIO_REMAPPED;
} }
@ -430,9 +430,7 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
return DM_ENDIO_DONE; return DM_ENDIO_DONE;
memset(major_minor, 0, sizeof(major_minor)); memset(major_minor, 0, sizeof(major_minor));
sprintf(major_minor, "%d:%d", sprintf(major_minor, "%d:%d", MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)));
MAJOR(disk_devt(bio->bi_bdev->bd_disk)),
MINOR(disk_devt(bio->bi_bdev->bd_disk)));
/* /*
* Test to see which stripe drive triggered the event * Test to see which stripe drive triggered the event

View File

@ -322,7 +322,7 @@ static int switch_map(struct dm_target *ti, struct bio *bio)
sector_t offset = dm_target_offset(ti, bio->bi_iter.bi_sector); sector_t offset = dm_target_offset(ti, bio->bi_iter.bi_sector);
unsigned path_nr = switch_get_path_nr(sctx, offset); unsigned path_nr = switch_get_path_nr(sctx, offset);
bio->bi_bdev = sctx->path_list[path_nr].dmdev->bdev; bio_set_dev(bio, sctx->path_list[path_nr].dmdev->bdev);
bio->bi_iter.bi_sector = sctx->path_list[path_nr].start + offset; bio->bi_iter.bi_sector = sctx->path_list[path_nr].start + offset;
return DM_MAPIO_REMAPPED; return DM_MAPIO_REMAPPED;

View File

@ -679,7 +679,7 @@ static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
struct pool *pool = tc->pool; struct pool *pool = tc->pool;
sector_t bi_sector = bio->bi_iter.bi_sector; sector_t bi_sector = bio->bi_iter.bi_sector;
bio->bi_bdev = tc->pool_dev->bdev; bio_set_dev(bio, tc->pool_dev->bdev);
if (block_size_is_power_of_two(pool)) if (block_size_is_power_of_two(pool))
bio->bi_iter.bi_sector = bio->bi_iter.bi_sector =
(block << pool->sectors_per_block_shift) | (block << pool->sectors_per_block_shift) |
@ -691,7 +691,7 @@ static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
static void remap_to_origin(struct thin_c *tc, struct bio *bio) static void remap_to_origin(struct thin_c *tc, struct bio *bio)
{ {
bio->bi_bdev = tc->origin_dev->bdev; bio_set_dev(bio, tc->origin_dev->bdev);
} }
static int bio_triggers_commit(struct thin_c *tc, struct bio *bio) static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
@ -3313,7 +3313,7 @@ static int pool_map(struct dm_target *ti, struct bio *bio)
* As this is a singleton target, ti->begin is always zero. * As this is a singleton target, ti->begin is always zero.
*/ */
spin_lock_irqsave(&pool->lock, flags); spin_lock_irqsave(&pool->lock, flags);
bio->bi_bdev = pt->data_dev->bdev; bio_set_dev(bio, pt->data_dev->bdev);
r = DM_MAPIO_REMAPPED; r = DM_MAPIO_REMAPPED;
spin_unlock_irqrestore(&pool->lock, flags); spin_unlock_irqrestore(&pool->lock, flags);

View File

@ -637,7 +637,7 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
struct dm_verity *v = ti->private; struct dm_verity *v = ti->private;
struct dm_verity_io *io; struct dm_verity_io *io;
bio->bi_bdev = v->data_dev->bdev; bio_set_dev(bio, v->data_dev->bdev);
bio->bi_iter.bi_sector = verity_map_sector(v, bio->bi_iter.bi_sector); bio->bi_iter.bi_sector = verity_map_sector(v, bio->bi_iter.bi_sector);
if (((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) & if (((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) &

View File

@ -409,7 +409,7 @@ static struct dmz_mblock *dmz_fetch_mblock(struct dmz_metadata *zmd,
} }
bio->bi_iter.bi_sector = dmz_blk2sect(block); bio->bi_iter.bi_sector = dmz_blk2sect(block);
bio->bi_bdev = zmd->dev->bdev; bio_set_dev(bio, zmd->dev->bdev);
bio->bi_private = mblk; bio->bi_private = mblk;
bio->bi_end_io = dmz_mblock_bio_end_io; bio->bi_end_io = dmz_mblock_bio_end_io;
bio_set_op_attrs(bio, REQ_OP_READ, REQ_META | REQ_PRIO); bio_set_op_attrs(bio, REQ_OP_READ, REQ_META | REQ_PRIO);
@ -564,7 +564,7 @@ static void dmz_write_mblock(struct dmz_metadata *zmd, struct dmz_mblock *mblk,
set_bit(DMZ_META_WRITING, &mblk->state); set_bit(DMZ_META_WRITING, &mblk->state);
bio->bi_iter.bi_sector = dmz_blk2sect(block); bio->bi_iter.bi_sector = dmz_blk2sect(block);
bio->bi_bdev = zmd->dev->bdev; bio_set_dev(bio, zmd->dev->bdev);
bio->bi_private = mblk; bio->bi_private = mblk;
bio->bi_end_io = dmz_mblock_bio_end_io; bio->bi_end_io = dmz_mblock_bio_end_io;
bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_META | REQ_PRIO); bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_META | REQ_PRIO);
@ -586,7 +586,7 @@ static int dmz_rdwr_block(struct dmz_metadata *zmd, int op, sector_t block,
return -ENOMEM; return -ENOMEM;
bio->bi_iter.bi_sector = dmz_blk2sect(block); bio->bi_iter.bi_sector = dmz_blk2sect(block);
bio->bi_bdev = zmd->dev->bdev; bio_set_dev(bio, zmd->dev->bdev);
bio_set_op_attrs(bio, op, REQ_SYNC | REQ_META | REQ_PRIO); bio_set_op_attrs(bio, op, REQ_SYNC | REQ_META | REQ_PRIO);
bio_add_page(bio, page, DMZ_BLOCK_SIZE, 0); bio_add_page(bio, page, DMZ_BLOCK_SIZE, 0);
ret = submit_bio_wait(bio); ret = submit_bio_wait(bio);

View File

@ -238,7 +238,7 @@ static void dmz_submit_write_bio(struct dmz_target *dmz, struct dm_zone *zone,
struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx)); struct dmz_bioctx *bioctx = dm_per_bio_data(bio, sizeof(struct dmz_bioctx));
/* Setup and submit the BIO */ /* Setup and submit the BIO */
bio->bi_bdev = dmz->dev->bdev; bio_set_dev(bio, dmz->dev->bdev);
bio->bi_iter.bi_sector = dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block); bio->bi_iter.bi_sector = dmz_start_sect(dmz->metadata, zone) + dmz_blk2sect(chunk_block);
atomic_inc(&bioctx->ref); atomic_inc(&bioctx->ref);
generic_make_request(bio); generic_make_request(bio);
@ -586,7 +586,7 @@ static int dmz_map(struct dm_target *ti, struct bio *bio)
(unsigned long long)dmz_chunk_block(dmz->dev, dmz_bio_block(bio)), (unsigned long long)dmz_chunk_block(dmz->dev, dmz_bio_block(bio)),
(unsigned int)dmz_bio_blocks(bio)); (unsigned int)dmz_bio_blocks(bio));
bio->bi_bdev = dev->bdev; bio_set_dev(bio, dev->bdev);
if (!nr_sectors && bio_op(bio) != REQ_OP_WRITE) if (!nr_sectors && bio_op(bio) != REQ_OP_WRITE)
return DM_MAPIO_REMAPPED; return DM_MAPIO_REMAPPED;

View File

@ -851,10 +851,10 @@ static void clone_endio(struct bio *bio)
if (unlikely(error == BLK_STS_TARGET)) { if (unlikely(error == BLK_STS_TARGET)) {
if (bio_op(bio) == REQ_OP_WRITE_SAME && if (bio_op(bio) == REQ_OP_WRITE_SAME &&
!bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors) !bio->bi_disk->queue->limits.max_write_same_sectors)
disable_write_same(md); disable_write_same(md);
if (bio_op(bio) == REQ_OP_WRITE_ZEROES && if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
!bdev_get_queue(bio->bi_bdev)->limits.max_write_zeroes_sectors) !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
disable_write_zeroes(md); disable_write_zeroes(md);
} }
@ -1215,8 +1215,8 @@ static void __map_bio(struct dm_target_io *tio)
break; break;
case DM_MAPIO_REMAPPED: case DM_MAPIO_REMAPPED:
/* the bio has been remapped so dispatch it */ /* the bio has been remapped so dispatch it */
trace_block_bio_remap(bdev_get_queue(clone->bi_bdev), clone, trace_block_bio_remap(clone->bi_disk->queue, clone,
tio->io->bio->bi_bdev->bd_dev, sector); bio_dev(tio->io->bio), sector);
generic_make_request(clone); generic_make_request(clone);
break; break;
case DM_MAPIO_KILL: case DM_MAPIO_KILL:
@ -1796,7 +1796,7 @@ static struct mapped_device *alloc_dev(int minor)
goto bad; goto bad;
bio_init(&md->flush_bio, NULL, 0); bio_init(&md->flush_bio, NULL, 0);
md->flush_bio.bi_bdev = md->bdev; bio_set_dev(&md->flush_bio, md->bdev);
md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC; md->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
dm_stats_init(&md->stats); dm_stats_init(&md->stats);

View File

@ -216,12 +216,12 @@ static bool faulty_make_request(struct mddev *mddev, struct bio *bio)
if (failit) { if (failit) {
struct bio *b = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set); struct bio *b = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set);
b->bi_bdev = conf->rdev->bdev; bio_set_dev(b, conf->rdev->bdev);
b->bi_private = bio; b->bi_private = bio;
b->bi_end_io = faulty_fail; b->bi_end_io = faulty_fail;
bio = b; bio = b;
} else } else
bio->bi_bdev = conf->rdev->bdev; bio_set_dev(bio, conf->rdev->bdev);
generic_make_request(bio); generic_make_request(bio);
return true; return true;

View File

@ -275,17 +275,17 @@ static bool linear_make_request(struct mddev *mddev, struct bio *bio)
bio = split; bio = split;
} }
bio->bi_bdev = tmp_dev->rdev->bdev; bio_set_dev(bio, tmp_dev->rdev->bdev);
bio->bi_iter.bi_sector = bio->bi_iter.bi_sector - bio->bi_iter.bi_sector = bio->bi_iter.bi_sector -
start_sector + data_offset; start_sector + data_offset;
if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
!blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) { !blk_queue_discard(bio->bi_disk->queue))) {
/* Just ignore it */ /* Just ignore it */
bio_endio(bio); bio_endio(bio);
} else { } else {
if (mddev->gendisk) if (mddev->gendisk)
trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), trace_block_bio_remap(bio->bi_disk->queue,
bio, disk_devt(mddev->gendisk), bio, disk_devt(mddev->gendisk),
bio_sector); bio_sector);
mddev_check_writesame(mddev, bio); mddev_check_writesame(mddev, bio);

View File

@ -422,7 +422,7 @@ static void submit_flushes(struct work_struct *ws)
bi = bio_alloc_mddev(GFP_NOIO, 0, mddev); bi = bio_alloc_mddev(GFP_NOIO, 0, mddev);
bi->bi_end_io = md_end_flush; bi->bi_end_io = md_end_flush;
bi->bi_private = rdev; bi->bi_private = rdev;
bi->bi_bdev = rdev->bdev; bio_set_dev(bi, rdev->bdev);
bi->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; bi->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
atomic_inc(&mddev->flush_pending); atomic_inc(&mddev->flush_pending);
submit_bio(bi); submit_bio(bi);
@ -772,7 +772,7 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
atomic_inc(&rdev->nr_pending); atomic_inc(&rdev->nr_pending);
bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev; bio_set_dev(bio, rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev);
bio->bi_iter.bi_sector = sector; bio->bi_iter.bi_sector = sector;
bio_add_page(bio, page, size, 0); bio_add_page(bio, page, size, 0);
bio->bi_private = rdev; bio->bi_private = rdev;
@ -803,8 +803,10 @@ int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
struct bio *bio = md_bio_alloc_sync(rdev->mddev); struct bio *bio = md_bio_alloc_sync(rdev->mddev);
int ret; int ret;
bio->bi_bdev = (metadata_op && rdev->meta_bdev) ? if (metadata_op && rdev->meta_bdev)
rdev->meta_bdev : rdev->bdev; bio_set_dev(bio, rdev->meta_bdev);
else
bio_set_dev(bio, rdev->bdev);
bio_set_op_attrs(bio, op, op_flags); bio_set_op_attrs(bio, op, op_flags);
if (metadata_op) if (metadata_op)
bio->bi_iter.bi_sector = sector + rdev->sb_start; bio->bi_iter.bi_sector = sector + rdev->sb_start;

View File

@ -509,6 +509,11 @@ static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sect
atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io); atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
} }
static inline void md_sync_acct_bio(struct bio *bio, unsigned long nr_sectors)
{
atomic_add(nr_sectors, &bio->bi_disk->sync_io);
}
struct md_personality struct md_personality
{ {
char *name; char *name;
@ -721,14 +726,14 @@ static inline void mddev_clear_unsupported_flags(struct mddev *mddev,
static inline void mddev_check_writesame(struct mddev *mddev, struct bio *bio) static inline void mddev_check_writesame(struct mddev *mddev, struct bio *bio)
{ {
if (bio_op(bio) == REQ_OP_WRITE_SAME && if (bio_op(bio) == REQ_OP_WRITE_SAME &&
!bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors) !bio->bi_disk->queue->limits.max_write_same_sectors)
mddev->queue->limits.max_write_same_sectors = 0; mddev->queue->limits.max_write_same_sectors = 0;
} }
static inline void mddev_check_write_zeroes(struct mddev *mddev, struct bio *bio) static inline void mddev_check_write_zeroes(struct mddev *mddev, struct bio *bio)
{ {
if (bio_op(bio) == REQ_OP_WRITE_ZEROES && if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
!bdev_get_queue(bio->bi_bdev)->limits.max_write_zeroes_sectors) !bio->bi_disk->queue->limits.max_write_zeroes_sectors)
mddev->queue->limits.max_write_zeroes_sectors = 0; mddev->queue->limits.max_write_zeroes_sectors = 0;
} }
#endif /* _MD_MD_H */ #endif /* _MD_MD_H */

View File

@ -134,7 +134,7 @@ static bool multipath_make_request(struct mddev *mddev, struct bio * bio)
__bio_clone_fast(&mp_bh->bio, bio); __bio_clone_fast(&mp_bh->bio, bio);
mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset; mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset;
mp_bh->bio.bi_bdev = multipath->rdev->bdev; bio_set_dev(&mp_bh->bio, multipath->rdev->bdev);
mp_bh->bio.bi_opf |= REQ_FAILFAST_TRANSPORT; mp_bh->bio.bi_opf |= REQ_FAILFAST_TRANSPORT;
mp_bh->bio.bi_end_io = multipath_end_request; mp_bh->bio.bi_end_io = multipath_end_request;
mp_bh->bio.bi_private = mp_bh; mp_bh->bio.bi_private = mp_bh;
@ -345,17 +345,17 @@ static void multipathd(struct md_thread *thread)
if ((mp_bh->path = multipath_map (conf))<0) { if ((mp_bh->path = multipath_map (conf))<0) {
pr_err("multipath: %s: unrecoverable IO read error for block %llu\n", pr_err("multipath: %s: unrecoverable IO read error for block %llu\n",
bdevname(bio->bi_bdev,b), bio_devname(bio, b),
(unsigned long long)bio->bi_iter.bi_sector); (unsigned long long)bio->bi_iter.bi_sector);
multipath_end_bh_io(mp_bh, BLK_STS_IOERR); multipath_end_bh_io(mp_bh, BLK_STS_IOERR);
} else { } else {
pr_err("multipath: %s: redirecting sector %llu to another IO path\n", pr_err("multipath: %s: redirecting sector %llu to another IO path\n",
bdevname(bio->bi_bdev,b), bio_devname(bio, b),
(unsigned long long)bio->bi_iter.bi_sector); (unsigned long long)bio->bi_iter.bi_sector);
*bio = *(mp_bh->master_bio); *bio = *(mp_bh->master_bio);
bio->bi_iter.bi_sector += bio->bi_iter.bi_sector +=
conf->multipaths[mp_bh->path].rdev->data_offset; conf->multipaths[mp_bh->path].rdev->data_offset;
bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev; bio_set_dev(bio, conf->multipaths[mp_bh->path].rdev->bdev);
bio->bi_opf |= REQ_FAILFAST_TRANSPORT; bio->bi_opf |= REQ_FAILFAST_TRANSPORT;
bio->bi_end_io = multipath_end_request; bio->bi_end_io = multipath_end_request;
bio->bi_private = mp_bh; bio->bi_private = mp_bh;

View File

@ -588,14 +588,13 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
zone = find_zone(mddev->private, &sector); zone = find_zone(mddev->private, &sector);
tmp_dev = map_sector(mddev, zone, sector, &sector); tmp_dev = map_sector(mddev, zone, sector, &sector);
bio->bi_bdev = tmp_dev->bdev; bio_set_dev(bio, tmp_dev->bdev);
bio->bi_iter.bi_sector = sector + zone->dev_start + bio->bi_iter.bi_sector = sector + zone->dev_start +
tmp_dev->data_offset; tmp_dev->data_offset;
if (mddev->gendisk) if (mddev->gendisk)
trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), trace_block_bio_remap(bio->bi_disk->queue, bio,
bio, disk_devt(mddev->gendisk), disk_devt(mddev->gendisk), bio_sector);
bio_sector);
mddev_check_writesame(mddev, bio); mddev_check_writesame(mddev, bio);
mddev_check_write_zeroes(mddev, bio); mddev_check_write_zeroes(mddev, bio);
generic_make_request(bio); generic_make_request(bio);

View File

@ -786,13 +786,13 @@ static void flush_bio_list(struct r1conf *conf, struct bio *bio)
while (bio) { /* submit pending writes */ while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next; struct bio *next = bio->bi_next;
struct md_rdev *rdev = (void*)bio->bi_bdev; struct md_rdev *rdev = (void *)bio->bi_disk;
bio->bi_next = NULL; bio->bi_next = NULL;
bio->bi_bdev = rdev->bdev; bio_set_dev(bio, rdev->bdev);
if (test_bit(Faulty, &rdev->flags)) { if (test_bit(Faulty, &rdev->flags)) {
bio_io_error(bio); bio_io_error(bio);
} else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
!blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) !blk_queue_discard(bio->bi_disk->queue)))
/* Just ignore it */ /* Just ignore it */
bio_endio(bio); bio_endio(bio);
else else
@ -1273,7 +1273,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
read_bio->bi_iter.bi_sector = r1_bio->sector + read_bio->bi_iter.bi_sector = r1_bio->sector +
mirror->rdev->data_offset; mirror->rdev->data_offset;
read_bio->bi_bdev = mirror->rdev->bdev; bio_set_dev(read_bio, mirror->rdev->bdev);
read_bio->bi_end_io = raid1_end_read_request; read_bio->bi_end_io = raid1_end_read_request;
bio_set_op_attrs(read_bio, op, do_sync); bio_set_op_attrs(read_bio, op, do_sync);
if (test_bit(FailFast, &mirror->rdev->flags) && if (test_bit(FailFast, &mirror->rdev->flags) &&
@ -1282,9 +1282,8 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
read_bio->bi_private = r1_bio; read_bio->bi_private = r1_bio;
if (mddev->gendisk) if (mddev->gendisk)
trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev), trace_block_bio_remap(read_bio->bi_disk->queue, read_bio,
read_bio, disk_devt(mddev->gendisk), disk_devt(mddev->gendisk), r1_bio->sector);
r1_bio->sector);
generic_make_request(read_bio); generic_make_request(read_bio);
} }
@ -1496,7 +1495,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
mbio->bi_iter.bi_sector = (r1_bio->sector + mbio->bi_iter.bi_sector = (r1_bio->sector +
conf->mirrors[i].rdev->data_offset); conf->mirrors[i].rdev->data_offset);
mbio->bi_bdev = conf->mirrors[i].rdev->bdev; bio_set_dev(mbio, conf->mirrors[i].rdev->bdev);
mbio->bi_end_io = raid1_end_write_request; mbio->bi_end_io = raid1_end_write_request;
mbio->bi_opf = bio_op(bio) | (bio->bi_opf & (REQ_SYNC | REQ_FUA)); mbio->bi_opf = bio_op(bio) | (bio->bi_opf & (REQ_SYNC | REQ_FUA));
if (test_bit(FailFast, &conf->mirrors[i].rdev->flags) && if (test_bit(FailFast, &conf->mirrors[i].rdev->flags) &&
@ -1508,11 +1507,11 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
atomic_inc(&r1_bio->remaining); atomic_inc(&r1_bio->remaining);
if (mddev->gendisk) if (mddev->gendisk)
trace_block_bio_remap(bdev_get_queue(mbio->bi_bdev), trace_block_bio_remap(mbio->bi_disk->queue,
mbio, disk_devt(mddev->gendisk), mbio, disk_devt(mddev->gendisk),
r1_bio->sector); r1_bio->sector);
/* flush_pending_writes() needs access to the rdev so...*/ /* flush_pending_writes() needs access to the rdev so...*/
mbio->bi_bdev = (void*)conf->mirrors[i].rdev; mbio->bi_disk = (void *)conf->mirrors[i].rdev;
cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug)); cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug));
if (cb) if (cb)
@ -1990,8 +1989,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
* Don't fail devices as that won't really help. * Don't fail devices as that won't really help.
*/ */
pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n", pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n",
mdname(mddev), mdname(mddev), bio_devname(bio, b),
bdevname(bio->bi_bdev, b),
(unsigned long long)r1_bio->sector); (unsigned long long)r1_bio->sector);
for (d = 0; d < conf->raid_disks * 2; d++) { for (d = 0; d < conf->raid_disks * 2; d++) {
rdev = conf->mirrors[d].rdev; rdev = conf->mirrors[d].rdev;
@ -2082,7 +2080,7 @@ static void process_checks(struct r1bio *r1_bio)
b->bi_status = status; b->bi_status = status;
b->bi_iter.bi_sector = r1_bio->sector + b->bi_iter.bi_sector = r1_bio->sector +
conf->mirrors[i].rdev->data_offset; conf->mirrors[i].rdev->data_offset;
b->bi_bdev = conf->mirrors[i].rdev->bdev; bio_set_dev(b, conf->mirrors[i].rdev->bdev);
b->bi_end_io = end_sync_read; b->bi_end_io = end_sync_read;
rp->raid_bio = r1_bio; rp->raid_bio = r1_bio;
b->bi_private = rp; b->bi_private = rp;
@ -2350,7 +2348,7 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)
bio_trim(wbio, sector - r1_bio->sector, sectors); bio_trim(wbio, sector - r1_bio->sector, sectors);
wbio->bi_iter.bi_sector += rdev->data_offset; wbio->bi_iter.bi_sector += rdev->data_offset;
wbio->bi_bdev = rdev->bdev; bio_set_dev(wbio, rdev->bdev);
if (submit_bio_wait(wbio) < 0) if (submit_bio_wait(wbio) < 0)
/* failure! */ /* failure! */
@ -2440,7 +2438,6 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
struct mddev *mddev = conf->mddev; struct mddev *mddev = conf->mddev;
struct bio *bio; struct bio *bio;
struct md_rdev *rdev; struct md_rdev *rdev;
dev_t bio_dev;
sector_t bio_sector; sector_t bio_sector;
clear_bit(R1BIO_ReadError, &r1_bio->state); clear_bit(R1BIO_ReadError, &r1_bio->state);
@ -2454,7 +2451,6 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
*/ */
bio = r1_bio->bios[r1_bio->read_disk]; bio = r1_bio->bios[r1_bio->read_disk];
bio_dev = bio->bi_bdev->bd_dev;
bio_sector = conf->mirrors[r1_bio->read_disk].rdev->data_offset + r1_bio->sector; bio_sector = conf->mirrors[r1_bio->read_disk].rdev->data_offset + r1_bio->sector;
bio_put(bio); bio_put(bio);
r1_bio->bios[r1_bio->read_disk] = NULL; r1_bio->bios[r1_bio->read_disk] = NULL;
@ -2727,7 +2723,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
if (bio->bi_end_io) { if (bio->bi_end_io) {
atomic_inc(&rdev->nr_pending); atomic_inc(&rdev->nr_pending);
bio->bi_iter.bi_sector = sector_nr + rdev->data_offset; bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
bio->bi_bdev = rdev->bdev; bio_set_dev(bio, rdev->bdev);
if (test_bit(FailFast, &rdev->flags)) if (test_bit(FailFast, &rdev->flags))
bio->bi_opf |= MD_FAILFAST; bio->bi_opf |= MD_FAILFAST;
} }
@ -2853,7 +2849,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
bio = r1_bio->bios[i]; bio = r1_bio->bios[i];
if (bio->bi_end_io == end_sync_read) { if (bio->bi_end_io == end_sync_read) {
read_targets--; read_targets--;
md_sync_acct(bio->bi_bdev, nr_sectors); md_sync_acct_bio(bio, nr_sectors);
if (read_targets == 1) if (read_targets == 1)
bio->bi_opf &= ~MD_FAILFAST; bio->bi_opf &= ~MD_FAILFAST;
generic_make_request(bio); generic_make_request(bio);
@ -2862,7 +2858,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
} else { } else {
atomic_set(&r1_bio->remaining, 1); atomic_set(&r1_bio->remaining, 1);
bio = r1_bio->bios[r1_bio->read_disk]; bio = r1_bio->bios[r1_bio->read_disk];
md_sync_acct(bio->bi_bdev, nr_sectors); md_sync_acct_bio(bio, nr_sectors);
if (read_targets == 1) if (read_targets == 1)
bio->bi_opf &= ~MD_FAILFAST; bio->bi_opf &= ~MD_FAILFAST;
generic_make_request(bio); generic_make_request(bio);

View File

@ -901,13 +901,13 @@ static void flush_pending_writes(struct r10conf *conf)
while (bio) { /* submit pending writes */ while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next; struct bio *next = bio->bi_next;
struct md_rdev *rdev = (void*)bio->bi_bdev; struct md_rdev *rdev = (void*)bio->bi_disk;
bio->bi_next = NULL; bio->bi_next = NULL;
bio->bi_bdev = rdev->bdev; bio_set_dev(bio, rdev->bdev);
if (test_bit(Faulty, &rdev->flags)) { if (test_bit(Faulty, &rdev->flags)) {
bio_io_error(bio); bio_io_error(bio);
} else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
!blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) !blk_queue_discard(bio->bi_disk->queue)))
/* Just ignore it */ /* Just ignore it */
bio_endio(bio); bio_endio(bio);
else else
@ -1085,13 +1085,13 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
while (bio) { /* submit pending writes */ while (bio) { /* submit pending writes */
struct bio *next = bio->bi_next; struct bio *next = bio->bi_next;
struct md_rdev *rdev = (void*)bio->bi_bdev; struct md_rdev *rdev = (void*)bio->bi_disk;
bio->bi_next = NULL; bio->bi_next = NULL;
bio->bi_bdev = rdev->bdev; bio_set_dev(bio, rdev->bdev);
if (test_bit(Faulty, &rdev->flags)) { if (test_bit(Faulty, &rdev->flags)) {
bio_io_error(bio); bio_io_error(bio);
} else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
!blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) !blk_queue_discard(bio->bi_disk->queue)))
/* Just ignore it */ /* Just ignore it */
bio_endio(bio); bio_endio(bio);
else else
@ -1200,7 +1200,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr + read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
choose_data_offset(r10_bio, rdev); choose_data_offset(r10_bio, rdev);
read_bio->bi_bdev = rdev->bdev; bio_set_dev(read_bio, rdev->bdev);
read_bio->bi_end_io = raid10_end_read_request; read_bio->bi_end_io = raid10_end_read_request;
bio_set_op_attrs(read_bio, op, do_sync); bio_set_op_attrs(read_bio, op, do_sync);
if (test_bit(FailFast, &rdev->flags) && if (test_bit(FailFast, &rdev->flags) &&
@ -1209,7 +1209,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
read_bio->bi_private = r10_bio; read_bio->bi_private = r10_bio;
if (mddev->gendisk) if (mddev->gendisk)
trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev), trace_block_bio_remap(read_bio->bi_disk->queue,
read_bio, disk_devt(mddev->gendisk), read_bio, disk_devt(mddev->gendisk),
r10_bio->sector); r10_bio->sector);
generic_make_request(read_bio); generic_make_request(read_bio);
@ -1249,7 +1249,7 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
mbio->bi_iter.bi_sector = (r10_bio->devs[n_copy].addr + mbio->bi_iter.bi_sector = (r10_bio->devs[n_copy].addr +
choose_data_offset(r10_bio, rdev)); choose_data_offset(r10_bio, rdev));
mbio->bi_bdev = rdev->bdev; bio_set_dev(mbio, rdev->bdev);
mbio->bi_end_io = raid10_end_write_request; mbio->bi_end_io = raid10_end_write_request;
bio_set_op_attrs(mbio, op, do_sync | do_fua); bio_set_op_attrs(mbio, op, do_sync | do_fua);
if (!replacement && test_bit(FailFast, if (!replacement && test_bit(FailFast,
@ -1259,11 +1259,11 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
mbio->bi_private = r10_bio; mbio->bi_private = r10_bio;
if (conf->mddev->gendisk) if (conf->mddev->gendisk)
trace_block_bio_remap(bdev_get_queue(mbio->bi_bdev), trace_block_bio_remap(mbio->bi_disk->queue,
mbio, disk_devt(conf->mddev->gendisk), mbio, disk_devt(conf->mddev->gendisk),
r10_bio->sector); r10_bio->sector);
/* flush_pending_writes() needs access to the rdev so...*/ /* flush_pending_writes() needs access to the rdev so...*/
mbio->bi_bdev = (void *)rdev; mbio->bi_disk = (void *)rdev;
atomic_inc(&r10_bio->remaining); atomic_inc(&r10_bio->remaining);
@ -2094,7 +2094,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
if (test_bit(FailFast, &conf->mirrors[d].rdev->flags)) if (test_bit(FailFast, &conf->mirrors[d].rdev->flags))
tbio->bi_opf |= MD_FAILFAST; tbio->bi_opf |= MD_FAILFAST;
tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset; tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset;
tbio->bi_bdev = conf->mirrors[d].rdev->bdev; bio_set_dev(tbio, conf->mirrors[d].rdev->bdev);
generic_make_request(tbio); generic_make_request(tbio);
} }
@ -2552,7 +2552,7 @@ static int narrow_write_error(struct r10bio *r10_bio, int i)
wsector = r10_bio->devs[i].addr + (sector - r10_bio->sector); wsector = r10_bio->devs[i].addr + (sector - r10_bio->sector);
wbio->bi_iter.bi_sector = wsector + wbio->bi_iter.bi_sector = wsector +
choose_data_offset(r10_bio, rdev); choose_data_offset(r10_bio, rdev);
wbio->bi_bdev = rdev->bdev; bio_set_dev(wbio, rdev->bdev);
bio_set_op_attrs(wbio, REQ_OP_WRITE, 0); bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
if (submit_bio_wait(wbio) < 0) if (submit_bio_wait(wbio) < 0)
@ -2575,7 +2575,6 @@ static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
struct bio *bio; struct bio *bio;
struct r10conf *conf = mddev->private; struct r10conf *conf = mddev->private;
struct md_rdev *rdev = r10_bio->devs[slot].rdev; struct md_rdev *rdev = r10_bio->devs[slot].rdev;
dev_t bio_dev;
sector_t bio_last_sector; sector_t bio_last_sector;
/* we got a read error. Maybe the drive is bad. Maybe just /* we got a read error. Maybe the drive is bad. Maybe just
@ -2587,7 +2586,6 @@ static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
* frozen. * frozen.
*/ */
bio = r10_bio->devs[slot].bio; bio = r10_bio->devs[slot].bio;
bio_dev = bio->bi_bdev->bd_dev;
bio_last_sector = r10_bio->devs[slot].addr + rdev->data_offset + r10_bio->sectors; bio_last_sector = r10_bio->devs[slot].addr + rdev->data_offset + r10_bio->sectors;
bio_put(bio); bio_put(bio);
r10_bio->devs[slot].bio = NULL; r10_bio->devs[slot].bio = NULL;
@ -2950,7 +2948,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
/* Again, very different code for resync and recovery. /* Again, very different code for resync and recovery.
* Both must result in an r10bio with a list of bios that * Both must result in an r10bio with a list of bios that
* have bi_end_io, bi_sector, bi_bdev set, * have bi_end_io, bi_sector, bi_disk set,
* and bi_private set to the r10bio. * and bi_private set to the r10bio.
* For recovery, we may actually create several r10bios * For recovery, we may actually create several r10bios
* with 2 bios in each, that correspond to the bios in the main one. * with 2 bios in each, that correspond to the bios in the main one.
@ -3095,7 +3093,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
from_addr = r10_bio->devs[j].addr; from_addr = r10_bio->devs[j].addr;
bio->bi_iter.bi_sector = from_addr + bio->bi_iter.bi_sector = from_addr +
rdev->data_offset; rdev->data_offset;
bio->bi_bdev = rdev->bdev; bio_set_dev(bio, rdev->bdev);
atomic_inc(&rdev->nr_pending); atomic_inc(&rdev->nr_pending);
/* and we write to 'i' (if not in_sync) */ /* and we write to 'i' (if not in_sync) */
@ -3117,7 +3115,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
bio_set_op_attrs(bio, REQ_OP_WRITE, 0); bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
bio->bi_iter.bi_sector = to_addr bio->bi_iter.bi_sector = to_addr
+ mrdev->data_offset; + mrdev->data_offset;
bio->bi_bdev = mrdev->bdev; bio_set_dev(bio, mrdev->bdev);
atomic_inc(&r10_bio->remaining); atomic_inc(&r10_bio->remaining);
} else } else
r10_bio->devs[1].bio->bi_end_io = NULL; r10_bio->devs[1].bio->bi_end_io = NULL;
@ -3143,7 +3141,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
bio_set_op_attrs(bio, REQ_OP_WRITE, 0); bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
bio->bi_iter.bi_sector = to_addr + bio->bi_iter.bi_sector = to_addr +
mreplace->data_offset; mreplace->data_offset;
bio->bi_bdev = mreplace->bdev; bio_set_dev(bio, mreplace->bdev);
atomic_inc(&r10_bio->remaining); atomic_inc(&r10_bio->remaining);
break; break;
} }
@ -3289,7 +3287,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
if (test_bit(FailFast, &rdev->flags)) if (test_bit(FailFast, &rdev->flags))
bio->bi_opf |= MD_FAILFAST; bio->bi_opf |= MD_FAILFAST;
bio->bi_iter.bi_sector = sector + rdev->data_offset; bio->bi_iter.bi_sector = sector + rdev->data_offset;
bio->bi_bdev = rdev->bdev; bio_set_dev(bio, rdev->bdev);
count++; count++;
rdev = rcu_dereference(conf->mirrors[d].replacement); rdev = rcu_dereference(conf->mirrors[d].replacement);
@ -3311,7 +3309,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
if (test_bit(FailFast, &rdev->flags)) if (test_bit(FailFast, &rdev->flags))
bio->bi_opf |= MD_FAILFAST; bio->bi_opf |= MD_FAILFAST;
bio->bi_iter.bi_sector = sector + rdev->data_offset; bio->bi_iter.bi_sector = sector + rdev->data_offset;
bio->bi_bdev = rdev->bdev; bio_set_dev(bio, rdev->bdev);
count++; count++;
rcu_read_unlock(); rcu_read_unlock();
} }
@ -3367,7 +3365,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
r10_bio->sectors = nr_sectors; r10_bio->sectors = nr_sectors;
if (bio->bi_end_io == end_sync_read) { if (bio->bi_end_io == end_sync_read) {
md_sync_acct(bio->bi_bdev, nr_sectors); md_sync_acct_bio(bio, nr_sectors);
bio->bi_status = 0; bio->bi_status = 0;
generic_make_request(bio); generic_make_request(bio);
} }
@ -4383,7 +4381,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev); read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev);
read_bio->bi_bdev = rdev->bdev; bio_set_dev(read_bio, rdev->bdev);
read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr
+ rdev->data_offset); + rdev->data_offset);
read_bio->bi_private = r10_bio; read_bio->bi_private = r10_bio;
@ -4417,7 +4415,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
if (!rdev2 || test_bit(Faulty, &rdev2->flags)) if (!rdev2 || test_bit(Faulty, &rdev2->flags))
continue; continue;
b->bi_bdev = rdev2->bdev; bio_set_dev(b, rdev2->bdev);
b->bi_iter.bi_sector = r10_bio->devs[s/2].addr + b->bi_iter.bi_sector = r10_bio->devs[s/2].addr +
rdev2->new_data_offset; rdev2->new_data_offset;
b->bi_end_io = end_reshape_write; b->bi_end_io = end_reshape_write;
@ -4449,7 +4447,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
r10_bio->sectors = nr_sectors; r10_bio->sectors = nr_sectors;
/* Now submit the read */ /* Now submit the read */
md_sync_acct(read_bio->bi_bdev, r10_bio->sectors); md_sync_acct_bio(read_bio, r10_bio->sectors);
atomic_inc(&r10_bio->remaining); atomic_inc(&r10_bio->remaining);
read_bio->bi_next = NULL; read_bio->bi_next = NULL;
generic_make_request(read_bio); generic_make_request(read_bio);
@ -4511,7 +4509,7 @@ static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio)
} }
atomic_inc(&rdev->nr_pending); atomic_inc(&rdev->nr_pending);
rcu_read_unlock(); rcu_read_unlock();
md_sync_acct(b->bi_bdev, r10_bio->sectors); md_sync_acct_bio(b, r10_bio->sectors);
atomic_inc(&r10_bio->remaining); atomic_inc(&r10_bio->remaining);
b->bi_next = NULL; b->bi_next = NULL;
generic_make_request(b); generic_make_request(b);

View File

@ -728,7 +728,7 @@ static struct bio *r5l_bio_alloc(struct r5l_log *log)
struct bio *bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES, log->bs); struct bio *bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES, log->bs);
bio_set_op_attrs(bio, REQ_OP_WRITE, 0); bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
bio->bi_bdev = log->rdev->bdev; bio_set_dev(bio, log->rdev->bdev);
bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start; bio->bi_iter.bi_sector = log->rdev->data_offset + log->log_start;
return bio; return bio;
@ -1291,7 +1291,7 @@ void r5l_flush_stripe_to_raid(struct r5l_log *log)
if (!do_flush) if (!do_flush)
return; return;
bio_reset(&log->flush_bio); bio_reset(&log->flush_bio);
log->flush_bio.bi_bdev = log->rdev->bdev; bio_set_dev(&log->flush_bio, log->rdev->bdev);
log->flush_bio.bi_end_io = r5l_log_flush_endio; log->flush_bio.bi_end_io = r5l_log_flush_endio;
log->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; log->flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
submit_bio(&log->flush_bio); submit_bio(&log->flush_bio);
@ -1669,7 +1669,7 @@ static int r5l_recovery_fetch_ra_pool(struct r5l_log *log,
sector_t offset) sector_t offset)
{ {
bio_reset(ctx->ra_bio); bio_reset(ctx->ra_bio);
ctx->ra_bio->bi_bdev = log->rdev->bdev; bio_set_dev(ctx->ra_bio, log->rdev->bdev);
bio_set_op_attrs(ctx->ra_bio, REQ_OP_READ, 0); bio_set_op_attrs(ctx->ra_bio, REQ_OP_READ, 0);
ctx->ra_bio->bi_iter.bi_sector = log->rdev->data_offset + offset; ctx->ra_bio->bi_iter.bi_sector = log->rdev->data_offset + offset;

View File

@ -415,7 +415,7 @@ static void ppl_submit_iounit_bio(struct ppl_io_unit *io, struct bio *bio)
pr_debug("%s: seq: %llu size: %u sector: %llu dev: %s\n", pr_debug("%s: seq: %llu size: %u sector: %llu dev: %s\n",
__func__, io->seq, bio->bi_iter.bi_size, __func__, io->seq, bio->bi_iter.bi_size,
(unsigned long long)bio->bi_iter.bi_sector, (unsigned long long)bio->bi_iter.bi_sector,
bdevname(bio->bi_bdev, b)); bio_devname(bio, b));
submit_bio(bio); submit_bio(bio);
} }
@ -453,7 +453,7 @@ static void ppl_submit_iounit(struct ppl_io_unit *io)
bio->bi_end_io = ppl_log_endio; bio->bi_end_io = ppl_log_endio;
bio->bi_opf = REQ_OP_WRITE | REQ_FUA; bio->bi_opf = REQ_OP_WRITE | REQ_FUA;
bio->bi_bdev = log->rdev->bdev; bio_set_dev(bio, log->rdev->bdev);
bio->bi_iter.bi_sector = log->rdev->ppl.sector; bio->bi_iter.bi_sector = log->rdev->ppl.sector;
bio_add_page(bio, io->header_page, PAGE_SIZE, 0); bio_add_page(bio, io->header_page, PAGE_SIZE, 0);
@ -468,7 +468,7 @@ static void ppl_submit_iounit(struct ppl_io_unit *io)
bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES, bio = bio_alloc_bioset(GFP_NOIO, BIO_MAX_PAGES,
ppl_conf->bs); ppl_conf->bs);
bio->bi_opf = prev->bi_opf; bio->bi_opf = prev->bi_opf;
bio->bi_bdev = prev->bi_bdev; bio_copy_dev(bio, prev);
bio->bi_iter.bi_sector = bio_end_sector(prev); bio->bi_iter.bi_sector = bio_end_sector(prev);
bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0); bio_add_page(bio, sh->ppl_page, PAGE_SIZE, 0);

View File

@ -1096,7 +1096,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
set_bit(STRIPE_IO_STARTED, &sh->state); set_bit(STRIPE_IO_STARTED, &sh->state);
bi->bi_bdev = rdev->bdev; bio_set_dev(bi, rdev->bdev);
bio_set_op_attrs(bi, op, op_flags); bio_set_op_attrs(bi, op, op_flags);
bi->bi_end_io = op_is_write(op) bi->bi_end_io = op_is_write(op)
? raid5_end_write_request ? raid5_end_write_request
@ -1145,7 +1145,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags); set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
if (conf->mddev->gendisk) if (conf->mddev->gendisk)
trace_block_bio_remap(bdev_get_queue(bi->bi_bdev), trace_block_bio_remap(bi->bi_disk->queue,
bi, disk_devt(conf->mddev->gendisk), bi, disk_devt(conf->mddev->gendisk),
sh->dev[i].sector); sh->dev[i].sector);
if (should_defer && op_is_write(op)) if (should_defer && op_is_write(op))
@ -1160,7 +1160,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
set_bit(STRIPE_IO_STARTED, &sh->state); set_bit(STRIPE_IO_STARTED, &sh->state);
rbi->bi_bdev = rrdev->bdev; bio_set_dev(rbi, rrdev->bdev);
bio_set_op_attrs(rbi, op, op_flags); bio_set_op_attrs(rbi, op, op_flags);
BUG_ON(!op_is_write(op)); BUG_ON(!op_is_write(op));
rbi->bi_end_io = raid5_end_write_request; rbi->bi_end_io = raid5_end_write_request;
@ -1193,7 +1193,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
if (op == REQ_OP_DISCARD) if (op == REQ_OP_DISCARD)
rbi->bi_vcnt = 0; rbi->bi_vcnt = 0;
if (conf->mddev->gendisk) if (conf->mddev->gendisk)
trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev), trace_block_bio_remap(rbi->bi_disk->queue,
rbi, disk_devt(conf->mddev->gendisk), rbi, disk_devt(conf->mddev->gendisk),
sh->dev[i].sector); sh->dev[i].sector);
if (should_defer && op_is_write(op)) if (should_defer && op_is_write(op))
@ -5233,7 +5233,7 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
atomic_inc(&rdev->nr_pending); atomic_inc(&rdev->nr_pending);
rcu_read_unlock(); rcu_read_unlock();
raid_bio->bi_next = (void*)rdev; raid_bio->bi_next = (void*)rdev;
align_bi->bi_bdev = rdev->bdev; bio_set_dev(align_bi, rdev->bdev);
bio_clear_flag(align_bi, BIO_SEG_VALID); bio_clear_flag(align_bi, BIO_SEG_VALID);
if (is_badblock(rdev, align_bi->bi_iter.bi_sector, if (is_badblock(rdev, align_bi->bi_iter.bi_sector,
@ -5255,7 +5255,7 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
spin_unlock_irq(&conf->device_lock); spin_unlock_irq(&conf->device_lock);
if (mddev->gendisk) if (mddev->gendisk)
trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev), trace_block_bio_remap(align_bi->bi_disk->queue,
align_bi, disk_devt(mddev->gendisk), align_bi, disk_devt(mddev->gendisk),
raid_bio->bi_iter.bi_sector); raid_bio->bi_iter.bi_sector);
generic_make_request(align_bi); generic_make_request(align_bi);

View File

@ -390,7 +390,7 @@ int nd_region_activate(struct nd_region *nd_region);
void __nd_iostat_start(struct bio *bio, unsigned long *start); void __nd_iostat_start(struct bio *bio, unsigned long *start);
static inline bool nd_iostat_start(struct bio *bio, unsigned long *start) static inline bool nd_iostat_start(struct bio *bio, unsigned long *start)
{ {
struct gendisk *disk = bio->bi_bdev->bd_disk; struct gendisk *disk = bio->bi_disk;
if (!blk_queue_io_stat(disk->queue)) if (!blk_queue_io_stat(disk->queue))
return false; return false;
@ -402,7 +402,7 @@ static inline bool nd_iostat_start(struct bio *bio, unsigned long *start)
} }
static inline void nd_iostat_end(struct bio *bio, unsigned long start) static inline void nd_iostat_end(struct bio *bio, unsigned long start)
{ {
struct gendisk *disk = bio->bi_bdev->bd_disk; struct gendisk *disk = bio->bi_disk;
generic_end_io_acct(disk->queue, bio_data_dir(bio), &disk->part0, generic_end_io_acct(disk->queue, bio_data_dir(bio), &disk->part0,
start); start);

View File

@ -613,11 +613,7 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
if (!disk) if (!disk)
goto submit; goto submit;
bio->bi_bdev = bdget_disk(disk, 0); bio->bi_disk = disk;
if (!bio->bi_bdev) {
ret = -ENODEV;
goto out_unmap;
}
if (meta_buffer && meta_len) { if (meta_buffer && meta_len) {
struct bio_integrity_payload *bip; struct bio_integrity_payload *bip;
@ -668,11 +664,8 @@ int __nvme_submit_user_cmd(struct request_queue *q, struct nvme_command *cmd,
out_free_meta: out_free_meta:
kfree(meta); kfree(meta);
out_unmap: out_unmap:
if (bio) { if (bio)
if (disk && bio->bi_bdev)
bdput(bio->bi_bdev);
blk_rq_unmap_user(bio); blk_rq_unmap_user(bio);
}
out: out:
blk_mq_free_request(req); blk_mq_free_request(req);
return ret; return ret;

View File

@ -643,17 +643,9 @@ static int nvme_nvm_submit_user_cmd(struct request_queue *q,
vcmd->ph_rw.metadata = cpu_to_le64(metadata_dma); vcmd->ph_rw.metadata = cpu_to_le64(metadata_dma);
} }
if (!disk) bio->bi_disk = disk;
goto submit;
bio->bi_bdev = bdget_disk(disk, 0);
if (!bio->bi_bdev) {
ret = -ENODEV;
goto err_meta;
}
} }
submit:
blk_execute_rq(q, NULL, rq, 0); blk_execute_rq(q, NULL, rq, 0);
if (nvme_req(rq)->flags & NVME_REQ_CANCELLED) if (nvme_req(rq)->flags & NVME_REQ_CANCELLED)
@ -673,11 +665,8 @@ static int nvme_nvm_submit_user_cmd(struct request_queue *q,
if (meta_buf && meta_len) if (meta_buf && meta_len)
dma_pool_free(dev->dma_pool, metadata, metadata_dma); dma_pool_free(dev->dma_pool, metadata, metadata_dma);
err_map: err_map:
if (bio) { if (bio)
if (disk && bio->bi_bdev)
bdput(bio->bi_bdev);
blk_rq_unmap_user(bio); blk_rq_unmap_user(bio);
}
err_ppa: err_ppa:
if (ppa_buf && ppa_len) if (ppa_buf && ppa_len)
dma_pool_free(dev->dma_pool, ppa_list, ppa_dma); dma_pool_free(dev->dma_pool, ppa_list, ppa_dma);

View File

@ -68,7 +68,7 @@ static void nvmet_execute_rw(struct nvmet_req *req)
nvmet_inline_bio_init(req); nvmet_inline_bio_init(req);
bio = &req->inline_bio; bio = &req->inline_bio;
bio->bi_bdev = req->ns->bdev; bio_set_dev(bio, req->ns->bdev);
bio->bi_iter.bi_sector = sector; bio->bi_iter.bi_sector = sector;
bio->bi_private = req; bio->bi_private = req;
bio->bi_end_io = nvmet_bio_done; bio->bi_end_io = nvmet_bio_done;
@ -80,7 +80,7 @@ static void nvmet_execute_rw(struct nvmet_req *req)
struct bio *prev = bio; struct bio *prev = bio;
bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES)); bio = bio_alloc(GFP_KERNEL, min(sg_cnt, BIO_MAX_PAGES));
bio->bi_bdev = req->ns->bdev; bio_set_dev(bio, req->ns->bdev);
bio->bi_iter.bi_sector = sector; bio->bi_iter.bi_sector = sector;
bio_set_op_attrs(bio, op, op_flags); bio_set_op_attrs(bio, op, op_flags);
@ -104,7 +104,7 @@ static void nvmet_execute_flush(struct nvmet_req *req)
nvmet_inline_bio_init(req); nvmet_inline_bio_init(req);
bio = &req->inline_bio; bio = &req->inline_bio;
bio->bi_bdev = req->ns->bdev; bio_set_dev(bio, req->ns->bdev);
bio->bi_private = req; bio->bi_private = req;
bio->bi_end_io = nvmet_bio_done; bio->bi_end_io = nvmet_bio_done;
bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;

View File

@ -856,14 +856,14 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio)
blk_queue_split(q, &bio); blk_queue_split(q, &bio);
bytes_done = 0; bytes_done = 0;
dev_info = bio->bi_bdev->bd_disk->private_data; dev_info = bio->bi_disk->private_data;
if (dev_info == NULL) if (dev_info == NULL)
goto fail; goto fail;
if ((bio->bi_iter.bi_sector & 7) != 0 || if ((bio->bi_iter.bi_sector & 7) != 0 ||
(bio->bi_iter.bi_size & 4095) != 0) (bio->bi_iter.bi_size & 4095) != 0)
/* Request is not page-aligned. */ /* Request is not page-aligned. */
goto fail; goto fail;
if (bio_end_sector(bio) > get_capacity(bio->bi_bdev->bd_disk)) { if (bio_end_sector(bio) > get_capacity(bio->bi_disk)) {
/* Request beyond end of DCSS segment. */ /* Request beyond end of DCSS segment. */
goto fail; goto fail;
} }

View File

@ -183,7 +183,7 @@ static unsigned long xpram_highest_page_index(void)
*/ */
static blk_qc_t xpram_make_request(struct request_queue *q, struct bio *bio) static blk_qc_t xpram_make_request(struct request_queue *q, struct bio *bio)
{ {
xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data; xpram_device_t *xdev = bio->bi_disk->private_data;
struct bio_vec bvec; struct bio_vec bvec;
struct bvec_iter iter; struct bvec_iter iter;
unsigned int index; unsigned int index;

View File

@ -338,7 +338,7 @@ iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num, int op,
return NULL; return NULL;
} }
bio->bi_bdev = ib_dev->ibd_bd; bio_set_dev(bio, ib_dev->ibd_bd);
bio->bi_private = cmd; bio->bi_private = cmd;
bio->bi_end_io = &iblock_bio_done; bio->bi_end_io = &iblock_bio_done;
bio->bi_iter.bi_sector = lba; bio->bi_iter.bi_sector = lba;
@ -395,7 +395,7 @@ iblock_execute_sync_cache(struct se_cmd *cmd)
bio = bio_alloc(GFP_KERNEL, 0); bio = bio_alloc(GFP_KERNEL, 0);
bio->bi_end_io = iblock_end_io_flush; bio->bi_end_io = iblock_end_io_flush;
bio->bi_bdev = ib_dev->ibd_bd; bio_set_dev(bio, ib_dev->ibd_bd);
bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH; bio->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
if (!immed) if (!immed)
bio->bi_private = cmd; bio->bi_private = cmd;

View File

@ -223,7 +223,7 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
} }
bio_init(&bio, vecs, nr_pages); bio_init(&bio, vecs, nr_pages);
bio.bi_bdev = bdev; bio_set_dev(&bio, bdev);
bio.bi_iter.bi_sector = pos >> 9; bio.bi_iter.bi_sector = pos >> 9;
bio.bi_write_hint = iocb->ki_hint; bio.bi_write_hint = iocb->ki_hint;
bio.bi_private = current; bio.bi_private = current;
@ -362,7 +362,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
blk_start_plug(&plug); blk_start_plug(&plug);
for (;;) { for (;;) {
bio->bi_bdev = bdev; bio_set_dev(bio, bdev);
bio->bi_iter.bi_sector = pos >> 9; bio->bi_iter.bi_sector = pos >> 9;
bio->bi_write_hint = iocb->ki_hint; bio->bi_write_hint = iocb->ki_hint;
bio->bi_private = dio; bio->bi_private = dio;

View File

@ -1635,7 +1635,7 @@ static int btrfsic_read_block(struct btrfsic_state *state,
unsigned int j; unsigned int j;
bio = btrfs_io_bio_alloc(num_pages - i); bio = btrfs_io_bio_alloc(num_pages - i);
bio->bi_bdev = block_ctx->dev->bdev; bio_set_dev(bio, block_ctx->dev->bdev);
bio->bi_iter.bi_sector = dev_bytenr >> 9; bio->bi_iter.bi_sector = dev_bytenr >> 9;
bio_set_op_attrs(bio, REQ_OP_READ, 0); bio_set_op_attrs(bio, REQ_OP_READ, 0);
@ -2803,7 +2803,7 @@ static void __btrfsic_submit_bio(struct bio *bio)
mutex_lock(&btrfsic_mutex); mutex_lock(&btrfsic_mutex);
/* since btrfsic_submit_bio() is also called before /* since btrfsic_submit_bio() is also called before
* btrfsic_mount(), this might return NULL */ * btrfsic_mount(), this might return NULL */
dev_state = btrfsic_dev_state_lookup(bio->bi_bdev->bd_dev); dev_state = btrfsic_dev_state_lookup(bio_dev(bio));
if (NULL != dev_state && if (NULL != dev_state &&
(bio_op(bio) == REQ_OP_WRITE) && bio_has_data(bio)) { (bio_op(bio) == REQ_OP_WRITE) && bio_has_data(bio)) {
unsigned int i = 0; unsigned int i = 0;
@ -2819,10 +2819,10 @@ static void __btrfsic_submit_bio(struct bio *bio)
bio_is_patched = 0; bio_is_patched = 0;
if (dev_state->state->print_mask & if (dev_state->state->print_mask &
BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH) BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
pr_info("submit_bio(rw=%d,0x%x, bi_vcnt=%u, bi_sector=%llu (bytenr %llu), bi_bdev=%p)\n", pr_info("submit_bio(rw=%d,0x%x, bi_vcnt=%u, bi_sector=%llu (bytenr %llu), bi_disk=%p)\n",
bio_op(bio), bio->bi_opf, segs, bio_op(bio), bio->bi_opf, segs,
(unsigned long long)bio->bi_iter.bi_sector, (unsigned long long)bio->bi_iter.bi_sector,
dev_bytenr, bio->bi_bdev); dev_bytenr, bio->bi_disk);
mapped_datav = kmalloc_array(segs, mapped_datav = kmalloc_array(segs,
sizeof(*mapped_datav), GFP_NOFS); sizeof(*mapped_datav), GFP_NOFS);
@ -2851,8 +2851,8 @@ static void __btrfsic_submit_bio(struct bio *bio)
} else if (NULL != dev_state && (bio->bi_opf & REQ_PREFLUSH)) { } else if (NULL != dev_state && (bio->bi_opf & REQ_PREFLUSH)) {
if (dev_state->state->print_mask & if (dev_state->state->print_mask &
BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH) BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
pr_info("submit_bio(rw=%d,0x%x FLUSH, bdev=%p)\n", pr_info("submit_bio(rw=%d,0x%x FLUSH, disk=%p)\n",
bio_op(bio), bio->bi_opf, bio->bi_bdev); bio_op(bio), bio->bi_opf, bio->bi_disk);
if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) { if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) {
if ((dev_state->state->print_mask & if ((dev_state->state->print_mask &
(BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH | (BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH |

View File

@ -3499,7 +3499,7 @@ static void write_dev_flush(struct btrfs_device *device)
bio_reset(bio); bio_reset(bio);
bio->bi_end_io = btrfs_end_empty_barrier; bio->bi_end_io = btrfs_end_empty_barrier;
bio->bi_bdev = device->bdev; bio_set_dev(bio, device->bdev);
bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH; bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH;
init_completion(&device->flush_wait); init_completion(&device->flush_wait);
bio->bi_private = &device->flush_wait; bio->bi_private = &device->flush_wait;

View File

@ -2033,7 +2033,7 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 ino, u64 start,
bio_put(bio); bio_put(bio);
return -EIO; return -EIO;
} }
bio->bi_bdev = dev->bdev; bio_set_dev(bio, dev->bdev);
bio->bi_opf = REQ_OP_WRITE | REQ_SYNC; bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
bio_add_page(bio, page, length, pg_offset); bio_add_page(bio, page, length, pg_offset);
@ -2335,7 +2335,7 @@ struct bio *btrfs_create_repair_bio(struct inode *inode, struct bio *failed_bio,
bio = btrfs_io_bio_alloc(1); bio = btrfs_io_bio_alloc(1);
bio->bi_end_io = endio_func; bio->bi_end_io = endio_func;
bio->bi_iter.bi_sector = failrec->logical >> 9; bio->bi_iter.bi_sector = failrec->logical >> 9;
bio->bi_bdev = fs_info->fs_devices->latest_bdev; bio_set_dev(bio, fs_info->fs_devices->latest_bdev);
bio->bi_iter.bi_size = 0; bio->bi_iter.bi_size = 0;
bio->bi_private = data; bio->bi_private = data;
@ -2675,7 +2675,7 @@ struct bio *btrfs_bio_alloc(struct block_device *bdev, u64 first_byte)
struct bio *bio; struct bio *bio;
bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, btrfs_bioset); bio = bio_alloc_bioset(GFP_NOFS, BIO_MAX_PAGES, btrfs_bioset);
bio->bi_bdev = bdev; bio_set_dev(bio, bdev);
bio->bi_iter.bi_sector = first_byte >> 9; bio->bi_iter.bi_sector = first_byte >> 9;
btrfs_io_bio_init(btrfs_io_bio(bio)); btrfs_io_bio_init(btrfs_io_bio(bio));
return bio; return bio;

View File

@ -1090,7 +1090,8 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
*/ */
if (last_end == disk_start && stripe->dev->bdev && if (last_end == disk_start && stripe->dev->bdev &&
!last->bi_status && !last->bi_status &&
last->bi_bdev == stripe->dev->bdev) { last->bi_disk == stripe->dev->bdev->bd_disk &&
last->bi_partno == stripe->dev->bdev->bd_partno) {
ret = bio_add_page(last, page, PAGE_SIZE, 0); ret = bio_add_page(last, page, PAGE_SIZE, 0);
if (ret == PAGE_SIZE) if (ret == PAGE_SIZE)
return 0; return 0;
@ -1100,7 +1101,7 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
/* put a new bio on the list */ /* put a new bio on the list */
bio = btrfs_io_bio_alloc(bio_max_len >> PAGE_SHIFT ?: 1); bio = btrfs_io_bio_alloc(bio_max_len >> PAGE_SHIFT ?: 1);
bio->bi_iter.bi_size = 0; bio->bi_iter.bi_size = 0;
bio->bi_bdev = stripe->dev->bdev; bio_set_dev(bio, stripe->dev->bdev);
bio->bi_iter.bi_sector = disk_start >> 9; bio->bi_iter.bi_sector = disk_start >> 9;
bio_add_page(bio, page, PAGE_SIZE, 0); bio_add_page(bio, page, PAGE_SIZE, 0);
@ -1347,7 +1348,8 @@ static int find_bio_stripe(struct btrfs_raid_bio *rbio,
stripe_start = stripe->physical; stripe_start = stripe->physical;
if (physical >= stripe_start && if (physical >= stripe_start &&
physical < stripe_start + rbio->stripe_len && physical < stripe_start + rbio->stripe_len &&
bio->bi_bdev == stripe->dev->bdev) { bio->bi_disk == stripe->dev->bdev->bd_disk &&
bio->bi_partno == stripe->dev->bdev->bd_partno) {
return i; return i;
} }
} }

View File

@ -1738,7 +1738,7 @@ static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
WARN_ON(!page->page); WARN_ON(!page->page);
bio = btrfs_io_bio_alloc(1); bio = btrfs_io_bio_alloc(1);
bio->bi_bdev = page->dev->bdev; bio_set_dev(bio, page->dev->bdev);
bio_add_page(bio, page->page, PAGE_SIZE, 0); bio_add_page(bio, page->page, PAGE_SIZE, 0);
if (!retry_failed_mirror && scrub_is_page_on_raid56(page)) { if (!retry_failed_mirror && scrub_is_page_on_raid56(page)) {
@ -1826,7 +1826,7 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
} }
bio = btrfs_io_bio_alloc(1); bio = btrfs_io_bio_alloc(1);
bio->bi_bdev = page_bad->dev->bdev; bio_set_dev(bio, page_bad->dev->bdev);
bio->bi_iter.bi_sector = page_bad->physical >> 9; bio->bi_iter.bi_sector = page_bad->physical >> 9;
bio_set_op_attrs(bio, REQ_OP_WRITE, 0); bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
@ -1921,7 +1921,7 @@ static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
bio->bi_private = sbio; bio->bi_private = sbio;
bio->bi_end_io = scrub_wr_bio_end_io; bio->bi_end_io = scrub_wr_bio_end_io;
bio->bi_bdev = sbio->dev->bdev; bio_set_dev(bio, sbio->dev->bdev);
bio->bi_iter.bi_sector = sbio->physical >> 9; bio->bi_iter.bi_sector = sbio->physical >> 9;
bio_set_op_attrs(bio, REQ_OP_WRITE, 0); bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
sbio->status = 0; sbio->status = 0;
@ -1964,7 +1964,7 @@ static void scrub_wr_submit(struct scrub_ctx *sctx)
sbio = sctx->wr_curr_bio; sbio = sctx->wr_curr_bio;
sctx->wr_curr_bio = NULL; sctx->wr_curr_bio = NULL;
WARN_ON(!sbio->bio->bi_bdev); WARN_ON(!sbio->bio->bi_disk);
scrub_pending_bio_inc(sctx); scrub_pending_bio_inc(sctx);
/* process all writes in a single worker thread. Then the block layer /* process all writes in a single worker thread. Then the block layer
* orders the requests before sending them to the driver which * orders the requests before sending them to the driver which
@ -2321,7 +2321,7 @@ static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
bio->bi_private = sbio; bio->bi_private = sbio;
bio->bi_end_io = scrub_bio_end_io; bio->bi_end_io = scrub_bio_end_io;
bio->bi_bdev = sbio->dev->bdev; bio_set_dev(bio, sbio->dev->bdev);
bio->bi_iter.bi_sector = sbio->physical >> 9; bio->bi_iter.bi_sector = sbio->physical >> 9;
bio_set_op_attrs(bio, REQ_OP_READ, 0); bio_set_op_attrs(bio, REQ_OP_READ, 0);
sbio->status = 0; sbio->status = 0;
@ -4627,7 +4627,7 @@ static int write_page_nocow(struct scrub_ctx *sctx,
bio = btrfs_io_bio_alloc(1); bio = btrfs_io_bio_alloc(1);
bio->bi_iter.bi_size = 0; bio->bi_iter.bi_size = 0;
bio->bi_iter.bi_sector = physical_for_dev_replace >> 9; bio->bi_iter.bi_sector = physical_for_dev_replace >> 9;
bio->bi_bdev = dev->bdev; bio_set_dev(bio, dev->bdev);
bio->bi_opf = REQ_OP_WRITE | REQ_SYNC; bio->bi_opf = REQ_OP_WRITE | REQ_SYNC;
ret = bio_add_page(bio, page, PAGE_SIZE, 0); ret = bio_add_page(bio, page, PAGE_SIZE, 0);
if (ret != PAGE_SIZE) { if (ret != PAGE_SIZE) {

View File

@ -6188,7 +6188,7 @@ static void submit_stripe_bio(struct btrfs_bio *bbio, struct bio *bio,
rcu_read_unlock(); rcu_read_unlock();
} }
#endif #endif
bio->bi_bdev = dev->bdev; bio_set_dev(bio, dev->bdev);
btrfs_bio_counter_inc_noblocked(fs_info); btrfs_bio_counter_inc_noblocked(fs_info);

View File

@ -3057,7 +3057,7 @@ void guard_bio_eod(int op, struct bio *bio)
struct bio_vec *bvec = &bio->bi_io_vec[bio->bi_vcnt - 1]; struct bio_vec *bvec = &bio->bi_io_vec[bio->bi_vcnt - 1];
unsigned truncated_bytes; unsigned truncated_bytes;
maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9; maxsector = get_capacity(bio->bi_disk);
if (!maxsector) if (!maxsector)
return; return;
@ -3116,7 +3116,7 @@ static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
} }
bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
bio->bi_bdev = bh->b_bdev; bio_set_dev(bio, bh->b_bdev);
bio->bi_write_hint = write_hint; bio->bi_write_hint = write_hint;
bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh)); bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh));

View File

@ -115,7 +115,7 @@ int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk,
err = -ENOMEM; err = -ENOMEM;
goto errout; goto errout;
} }
bio->bi_bdev = inode->i_sb->s_bdev; bio_set_dev(bio, inode->i_sb->s_bdev);
bio->bi_iter.bi_sector = bio->bi_iter.bi_sector =
pblk << (inode->i_sb->s_blocksize_bits - 9); pblk << (inode->i_sb->s_blocksize_bits - 9);
bio_set_op_attrs(bio, REQ_OP_WRITE, 0); bio_set_op_attrs(bio, REQ_OP_WRITE, 0);

View File

@ -111,7 +111,7 @@ struct dio {
int op; int op;
int op_flags; int op_flags;
blk_qc_t bio_cookie; blk_qc_t bio_cookie;
struct block_device *bio_bdev; struct gendisk *bio_disk;
struct inode *inode; struct inode *inode;
loff_t i_size; /* i_size when submitted */ loff_t i_size; /* i_size when submitted */
dio_iodone_t *end_io; /* IO completion function */ dio_iodone_t *end_io; /* IO completion function */
@ -377,7 +377,7 @@ dio_bio_alloc(struct dio *dio, struct dio_submit *sdio,
*/ */
bio = bio_alloc(GFP_KERNEL, nr_vecs); bio = bio_alloc(GFP_KERNEL, nr_vecs);
bio->bi_bdev = bdev; bio_set_dev(bio, bdev);
bio->bi_iter.bi_sector = first_sector; bio->bi_iter.bi_sector = first_sector;
bio_set_op_attrs(bio, dio->op, dio->op_flags); bio_set_op_attrs(bio, dio->op, dio->op_flags);
if (dio->is_async) if (dio->is_async)
@ -412,7 +412,7 @@ static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio)
if (dio->is_async && dio->op == REQ_OP_READ && dio->should_dirty) if (dio->is_async && dio->op == REQ_OP_READ && dio->should_dirty)
bio_set_pages_dirty(bio); bio_set_pages_dirty(bio);
dio->bio_bdev = bio->bi_bdev; dio->bio_disk = bio->bi_disk;
if (sdio->submit_io) { if (sdio->submit_io) {
sdio->submit_io(bio, dio->inode, sdio->logical_offset_in_bio); sdio->submit_io(bio, dio->inode, sdio->logical_offset_in_bio);
@ -458,7 +458,7 @@ static struct bio *dio_await_one(struct dio *dio)
dio->waiter = current; dio->waiter = current;
spin_unlock_irqrestore(&dio->bio_lock, flags); spin_unlock_irqrestore(&dio->bio_lock, flags);
if (!(dio->iocb->ki_flags & IOCB_HIPRI) || if (!(dio->iocb->ki_flags & IOCB_HIPRI) ||
!blk_mq_poll(bdev_get_queue(dio->bio_bdev), dio->bio_cookie)) !blk_mq_poll(dio->bio_disk->queue, dio->bio_cookie))
io_schedule(); io_schedule();
/* wake up sets us TASK_RUNNING */ /* wake up sets us TASK_RUNNING */
spin_lock_irqsave(&dio->bio_lock, flags); spin_lock_irqsave(&dio->bio_lock, flags);

View File

@ -869,7 +869,7 @@ static int _write_mirror(struct ore_io_state *ios, int cur_comp)
goto out; goto out;
} }
bio->bi_bdev = NULL; bio->bi_disk = NULL;
bio->bi_next = NULL; bio->bi_next = NULL;
per_dev->offset = master_dev->offset; per_dev->offset = master_dev->offset;
per_dev->length = master_dev->length; per_dev->length = master_dev->length;

View File

@ -300,7 +300,7 @@ static void ext4_end_bio(struct bio *bio)
char b[BDEVNAME_SIZE]; char b[BDEVNAME_SIZE];
if (WARN_ONCE(!io_end, "io_end is NULL: %s: sector %Lu len %u err %d\n", if (WARN_ONCE(!io_end, "io_end is NULL: %s: sector %Lu len %u err %d\n",
bdevname(bio->bi_bdev, b), bio_devname(bio, b),
(long long) bio->bi_iter.bi_sector, (long long) bio->bi_iter.bi_sector,
(unsigned) bio_sectors(bio), (unsigned) bio_sectors(bio),
bio->bi_status)) { bio->bi_status)) {
@ -375,7 +375,7 @@ static int io_submit_init_bio(struct ext4_io_submit *io,
return -ENOMEM; return -ENOMEM;
wbc_init_bio(io->io_wbc, bio); wbc_init_bio(io->io_wbc, bio);
bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
bio->bi_bdev = bh->b_bdev; bio_set_dev(bio, bh->b_bdev);
bio->bi_end_io = ext4_end_bio; bio->bi_end_io = ext4_end_bio;
bio->bi_private = ext4_get_io_end(io->io_end); bio->bi_private = ext4_get_io_end(io->io_end);
io->io_bio = bio; io->io_bio = bio;

View File

@ -254,7 +254,7 @@ int ext4_mpage_readpages(struct address_space *mapping,
fscrypt_release_ctx(ctx); fscrypt_release_ctx(ctx);
goto set_error_page; goto set_error_page;
} }
bio->bi_bdev = bdev; bio_set_dev(bio, bdev);
bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9); bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
bio->bi_end_io = mpage_end_io; bio->bi_end_io = mpage_end_io;
bio->bi_private = ctx; bio->bi_private = ctx;

View File

@ -142,7 +142,7 @@ struct block_device *f2fs_target_device(struct f2fs_sb_info *sbi,
} }
} }
if (bio) { if (bio) {
bio->bi_bdev = bdev; bio_set_dev(bio, bdev);
bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr); bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
} }
return bdev; return bdev;
@ -161,7 +161,8 @@ int f2fs_target_device_index(struct f2fs_sb_info *sbi, block_t blkaddr)
static bool __same_bdev(struct f2fs_sb_info *sbi, static bool __same_bdev(struct f2fs_sb_info *sbi,
block_t blk_addr, struct bio *bio) block_t blk_addr, struct bio *bio)
{ {
return f2fs_target_device(sbi, blk_addr, NULL) == bio->bi_bdev; struct block_device *b = f2fs_target_device(sbi, blk_addr, NULL);
return bio->bi_disk == b->bd_disk && bio->bi_partno == b->bd_partno;
} }
/* /*

View File

@ -447,7 +447,7 @@ static int __submit_flush_wait(struct f2fs_sb_info *sbi,
int ret; int ret;
bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH; bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH;
bio->bi_bdev = bdev; bio_set_dev(bio, bdev);
ret = submit_bio_wait(bio); ret = submit_bio_wait(bio);
bio_put(bio); bio_put(bio);

View File

@ -265,7 +265,7 @@ static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno)
bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES); bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
bio->bi_iter.bi_sector = blkno * (sb->s_blocksize >> 9); bio->bi_iter.bi_sector = blkno * (sb->s_blocksize >> 9);
bio->bi_bdev = sb->s_bdev; bio_set_dev(bio, sb->s_bdev);
bio->bi_end_io = gfs2_end_log_write; bio->bi_end_io = gfs2_end_log_write;
bio->bi_private = sdp; bio->bi_private = sdp;

View File

@ -221,7 +221,7 @@ static void gfs2_submit_bhs(int op, int op_flags, struct buffer_head *bhs[],
bio = bio_alloc(GFP_NOIO, num); bio = bio_alloc(GFP_NOIO, num);
bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
bio->bi_bdev = bh->b_bdev; bio_set_dev(bio, bh->b_bdev);
while (num > 0) { while (num > 0) {
bh = *bhs; bh = *bhs;
if (!bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh))) { if (!bio_add_page(bio, bh->b_page, bh->b_size, bh_offset(bh))) {

View File

@ -242,7 +242,7 @@ static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector, int silent)
bio = bio_alloc(GFP_NOFS, 1); bio = bio_alloc(GFP_NOFS, 1);
bio->bi_iter.bi_sector = sector * (sb->s_blocksize >> 9); bio->bi_iter.bi_sector = sector * (sb->s_blocksize >> 9);
bio->bi_bdev = sb->s_bdev; bio_set_dev(bio, sb->s_bdev);
bio_add_page(bio, page, PAGE_SIZE, 0); bio_add_page(bio, page, PAGE_SIZE, 0);
bio->bi_end_io = end_bio_io_page; bio->bi_end_io = end_bio_io_page;

View File

@ -65,7 +65,7 @@ int hfsplus_submit_bio(struct super_block *sb, sector_t sector,
bio = bio_alloc(GFP_NOIO, 1); bio = bio_alloc(GFP_NOIO, 1);
bio->bi_iter.bi_sector = sector; bio->bi_iter.bi_sector = sector;
bio->bi_bdev = sb->s_bdev; bio_set_dev(bio, sb->s_bdev);
bio_set_op_attrs(bio, op, op_flags); bio_set_op_attrs(bio, op, op_flags);
if (op != WRITE && data) if (op != WRITE && data)

View File

@ -805,7 +805,7 @@ iomap_dio_zero(struct iomap_dio *dio, struct iomap *iomap, loff_t pos,
struct bio *bio; struct bio *bio;
bio = bio_alloc(GFP_KERNEL, 1); bio = bio_alloc(GFP_KERNEL, 1);
bio->bi_bdev = iomap->bdev; bio_set_dev(bio, iomap->bdev);
bio->bi_iter.bi_sector = bio->bi_iter.bi_sector =
iomap->blkno + ((pos - iomap->offset) >> 9); iomap->blkno + ((pos - iomap->offset) >> 9);
bio->bi_private = dio; bio->bi_private = dio;
@ -884,7 +884,7 @@ iomap_dio_actor(struct inode *inode, loff_t pos, loff_t length,
return 0; return 0;
bio = bio_alloc(GFP_KERNEL, nr_pages); bio = bio_alloc(GFP_KERNEL, nr_pages);
bio->bi_bdev = iomap->bdev; bio_set_dev(bio, iomap->bdev);
bio->bi_iter.bi_sector = bio->bi_iter.bi_sector =
iomap->blkno + ((pos - iomap->offset) >> 9); iomap->blkno + ((pos - iomap->offset) >> 9);
bio->bi_write_hint = dio->iocb->ki_hint; bio->bi_write_hint = dio->iocb->ki_hint;

View File

@ -1995,7 +1995,7 @@ static int lbmRead(struct jfs_log * log, int pn, struct lbuf ** bpp)
bio = bio_alloc(GFP_NOFS, 1); bio = bio_alloc(GFP_NOFS, 1);
bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9); bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9);
bio->bi_bdev = log->bdev; bio_set_dev(bio, log->bdev);
bio_add_page(bio, bp->l_page, LOGPSIZE, bp->l_offset); bio_add_page(bio, bp->l_page, LOGPSIZE, bp->l_offset);
BUG_ON(bio->bi_iter.bi_size != LOGPSIZE); BUG_ON(bio->bi_iter.bi_size != LOGPSIZE);
@ -2139,7 +2139,7 @@ static void lbmStartIO(struct lbuf * bp)
bio = bio_alloc(GFP_NOFS, 1); bio = bio_alloc(GFP_NOFS, 1);
bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9); bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9);
bio->bi_bdev = log->bdev; bio_set_dev(bio, log->bdev);
bio_add_page(bio, bp->l_page, LOGPSIZE, bp->l_offset); bio_add_page(bio, bp->l_page, LOGPSIZE, bp->l_offset);
BUG_ON(bio->bi_iter.bi_size != LOGPSIZE); BUG_ON(bio->bi_iter.bi_size != LOGPSIZE);

View File

@ -430,7 +430,7 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc)
len = min(xlen, (int)JFS_SBI(inode->i_sb)->nbperpage); len = min(xlen, (int)JFS_SBI(inode->i_sb)->nbperpage);
bio = bio_alloc(GFP_NOFS, 1); bio = bio_alloc(GFP_NOFS, 1);
bio->bi_bdev = inode->i_sb->s_bdev; bio_set_dev(bio, inode->i_sb->s_bdev);
bio->bi_iter.bi_sector = pblock << (inode->i_blkbits - 9); bio->bi_iter.bi_sector = pblock << (inode->i_blkbits - 9);
bio->bi_end_io = metapage_write_end_io; bio->bi_end_io = metapage_write_end_io;
bio->bi_private = page; bio->bi_private = page;
@ -510,7 +510,7 @@ static int metapage_readpage(struct file *fp, struct page *page)
submit_bio(bio); submit_bio(bio);
bio = bio_alloc(GFP_NOFS, 1); bio = bio_alloc(GFP_NOFS, 1);
bio->bi_bdev = inode->i_sb->s_bdev; bio_set_dev(bio, inode->i_sb->s_bdev);
bio->bi_iter.bi_sector = bio->bi_iter.bi_sector =
pblock << (inode->i_blkbits - 9); pblock << (inode->i_blkbits - 9);
bio->bi_end_io = metapage_read_end_io; bio->bi_end_io = metapage_read_end_io;

View File

@ -83,7 +83,7 @@ mpage_alloc(struct block_device *bdev,
} }
if (bio) { if (bio) {
bio->bi_bdev = bdev; bio_set_dev(bio, bdev);
bio->bi_iter.bi_sector = first_sector; bio->bi_iter.bi_sector = first_sector;
} }
return bio; return bio;

View File

@ -130,7 +130,7 @@ bl_alloc_init_bio(int npg, struct block_device *bdev, sector_t disk_sector,
if (bio) { if (bio) {
bio->bi_iter.bi_sector = disk_sector; bio->bi_iter.bi_sector = disk_sector;
bio->bi_bdev = bdev; bio_set_dev(bio, bdev);
bio->bi_end_io = end_io; bio->bi_end_io = end_io;
bio->bi_private = par; bio->bi_private = par;
} }

View File

@ -400,7 +400,7 @@ static struct bio *nilfs_alloc_seg_bio(struct the_nilfs *nilfs, sector_t start,
bio = bio_alloc(GFP_NOIO, nr_vecs); bio = bio_alloc(GFP_NOIO, nr_vecs);
} }
if (likely(bio)) { if (likely(bio)) {
bio->bi_bdev = nilfs->ns_bdev; bio_set_dev(bio, nilfs->ns_bdev);
bio->bi_iter.bi_sector = bio->bi_iter.bi_sector =
start << (nilfs->ns_blocksize_bits - 9); start << (nilfs->ns_blocksize_bits - 9);
} }

View File

@ -554,7 +554,7 @@ static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg,
/* Must put everything in 512 byte sectors for the bio... */ /* Must put everything in 512 byte sectors for the bio... */
bio->bi_iter.bi_sector = (reg->hr_start_block + cs) << (bits - 9); bio->bi_iter.bi_sector = (reg->hr_start_block + cs) << (bits - 9);
bio->bi_bdev = reg->hr_bdev; bio_set_dev(bio, reg->hr_bdev);
bio->bi_private = wc; bio->bi_private = wc;
bio->bi_end_io = o2hb_bio_end_io; bio->bi_end_io = o2hb_bio_end_io;
bio_set_op_attrs(bio, op, op_flags); bio_set_op_attrs(bio, op, op_flags);

View File

@ -517,7 +517,7 @@ xfs_init_bio_from_bh(
struct buffer_head *bh) struct buffer_head *bh)
{ {
bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9); bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
bio->bi_bdev = bh->b_bdev; bio_set_dev(bio, bh->b_bdev);
} }
static struct xfs_ioend * static struct xfs_ioend *

View File

@ -1281,7 +1281,7 @@ xfs_buf_ioapply_map(
nr_pages = min(total_nr_pages, BIO_MAX_PAGES); nr_pages = min(total_nr_pages, BIO_MAX_PAGES);
bio = bio_alloc(GFP_NOIO, nr_pages); bio = bio_alloc(GFP_NOIO, nr_pages);
bio->bi_bdev = bp->b_target->bt_bdev; bio_set_dev(bio, bp->b_target->bt_bdev);
bio->bi_iter.bi_sector = sector; bio->bi_iter.bi_sector = sector;
bio->bi_end_io = xfs_buf_bio_end_io; bio->bi_end_io = xfs_buf_bio_end_io;
bio->bi_private = bp; bio->bi_private = bp;

View File

@ -494,6 +494,24 @@ extern struct bio_vec *bvec_alloc(gfp_t, int, unsigned long *, mempool_t *);
extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int); extern void bvec_free(mempool_t *, struct bio_vec *, unsigned int);
extern unsigned int bvec_nr_vecs(unsigned short idx); extern unsigned int bvec_nr_vecs(unsigned short idx);
#define bio_set_dev(bio, bdev) \
do { \
(bio)->bi_disk = (bdev)->bd_disk; \
(bio)->bi_partno = (bdev)->bd_partno; \
} while (0)
#define bio_copy_dev(dst, src) \
do { \
(dst)->bi_disk = (src)->bi_disk; \
(dst)->bi_partno = (src)->bi_partno; \
} while (0)
#define bio_dev(bio) \
disk_devt((bio)->bi_disk)
#define bio_devname(bio, buf) \
__bdevname(bio_dev(bio), (buf))
#ifdef CONFIG_BLK_CGROUP #ifdef CONFIG_BLK_CGROUP
int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css); int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css);
int bio_associate_current(struct bio *bio); int bio_associate_current(struct bio *bio);

View File

@ -48,7 +48,8 @@ struct blk_issue_stat {
*/ */
struct bio { struct bio {
struct bio *bi_next; /* request queue link */ struct bio *bi_next; /* request queue link */
struct block_device *bi_bdev; struct gendisk *bi_disk;
u8 bi_partno;
blk_status_t bi_status; blk_status_t bi_status;
unsigned int bi_opf; /* bottom bits req flags, unsigned int bi_opf; /* bottom bits req flags,
* top bits REQ_OP. Use * top bits REQ_OP. Use

View File

@ -21,7 +21,7 @@ DECLARE_EVENT_CLASS(bcache_request,
), ),
TP_fast_assign( TP_fast_assign(
__entry->dev = bio->bi_bdev->bd_dev; __entry->dev = bio_dev(bio);
__entry->orig_major = d->disk->major; __entry->orig_major = d->disk->major;
__entry->orig_minor = d->disk->first_minor; __entry->orig_minor = d->disk->first_minor;
__entry->sector = bio->bi_iter.bi_sector; __entry->sector = bio->bi_iter.bi_sector;
@ -98,7 +98,7 @@ DECLARE_EVENT_CLASS(bcache_bio,
), ),
TP_fast_assign( TP_fast_assign(
__entry->dev = bio->bi_bdev->bd_dev; __entry->dev = bio_dev(bio);
__entry->sector = bio->bi_iter.bi_sector; __entry->sector = bio->bi_iter.bi_sector;
__entry->nr_sector = bio->bi_iter.bi_size >> 9; __entry->nr_sector = bio->bi_iter.bi_size >> 9;
blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
@ -133,7 +133,7 @@ TRACE_EVENT(bcache_read,
), ),
TP_fast_assign( TP_fast_assign(
__entry->dev = bio->bi_bdev->bd_dev; __entry->dev = bio_dev(bio);
__entry->sector = bio->bi_iter.bi_sector; __entry->sector = bio->bi_iter.bi_sector;
__entry->nr_sector = bio->bi_iter.bi_size >> 9; __entry->nr_sector = bio->bi_iter.bi_size >> 9;
blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);

View File

@ -236,8 +236,7 @@ TRACE_EVENT(block_bio_bounce,
), ),
TP_fast_assign( TP_fast_assign(
__entry->dev = bio->bi_bdev ? __entry->dev = bio_dev(bio);
bio->bi_bdev->bd_dev : 0;
__entry->sector = bio->bi_iter.bi_sector; __entry->sector = bio->bi_iter.bi_sector;
__entry->nr_sector = bio_sectors(bio); __entry->nr_sector = bio_sectors(bio);
blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
@ -274,7 +273,7 @@ TRACE_EVENT(block_bio_complete,
), ),
TP_fast_assign( TP_fast_assign(
__entry->dev = bio->bi_bdev->bd_dev; __entry->dev = bio_dev(bio);
__entry->sector = bio->bi_iter.bi_sector; __entry->sector = bio->bi_iter.bi_sector;
__entry->nr_sector = bio_sectors(bio); __entry->nr_sector = bio_sectors(bio);
__entry->error = error; __entry->error = error;
@ -302,7 +301,7 @@ DECLARE_EVENT_CLASS(block_bio_merge,
), ),
TP_fast_assign( TP_fast_assign(
__entry->dev = bio->bi_bdev->bd_dev; __entry->dev = bio_dev(bio);
__entry->sector = bio->bi_iter.bi_sector; __entry->sector = bio->bi_iter.bi_sector;
__entry->nr_sector = bio_sectors(bio); __entry->nr_sector = bio_sectors(bio);
blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
@ -369,7 +368,7 @@ TRACE_EVENT(block_bio_queue,
), ),
TP_fast_assign( TP_fast_assign(
__entry->dev = bio->bi_bdev->bd_dev; __entry->dev = bio_dev(bio);
__entry->sector = bio->bi_iter.bi_sector; __entry->sector = bio->bi_iter.bi_sector;
__entry->nr_sector = bio_sectors(bio); __entry->nr_sector = bio_sectors(bio);
blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
@ -397,7 +396,8 @@ DECLARE_EVENT_CLASS(block_get_rq,
), ),
TP_fast_assign( TP_fast_assign(
__entry->dev = bio ? bio->bi_bdev->bd_dev : 0; __entry->dev = bio ? bio_dev(bio) : 0;
__entry->dev = bio_dev(bio);
__entry->sector = bio ? bio->bi_iter.bi_sector : 0; __entry->sector = bio ? bio->bi_iter.bi_sector : 0;
__entry->nr_sector = bio ? bio_sectors(bio) : 0; __entry->nr_sector = bio ? bio_sectors(bio) : 0;
blk_fill_rwbs(__entry->rwbs, blk_fill_rwbs(__entry->rwbs,
@ -532,7 +532,7 @@ TRACE_EVENT(block_split,
), ),
TP_fast_assign( TP_fast_assign(
__entry->dev = bio->bi_bdev->bd_dev; __entry->dev = bio_dev(bio);
__entry->sector = bio->bi_iter.bi_sector; __entry->sector = bio->bi_iter.bi_sector;
__entry->new_sector = new_sector; __entry->new_sector = new_sector;
blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size); blk_fill_rwbs(__entry->rwbs, bio->bi_opf, bio->bi_iter.bi_size);
@ -573,7 +573,7 @@ TRACE_EVENT(block_bio_remap,
), ),
TP_fast_assign( TP_fast_assign(
__entry->dev = bio->bi_bdev->bd_dev; __entry->dev = bio_dev(bio);
__entry->sector = bio->bi_iter.bi_sector; __entry->sector = bio->bi_iter.bi_sector;
__entry->nr_sector = bio_sectors(bio); __entry->nr_sector = bio_sectors(bio);
__entry->old_dev = dev; __entry->old_dev = dev;

View File

@ -829,7 +829,7 @@ DECLARE_EVENT_CLASS(f2fs__bio,
TP_fast_assign( TP_fast_assign(
__entry->dev = sb->s_dev; __entry->dev = sb->s_dev;
__entry->target = bio->bi_bdev->bd_dev; __entry->target = bio_dev(bio);
__entry->op = bio_op(bio); __entry->op = bio_op(bio);
__entry->op_flags = bio->bi_opf; __entry->op_flags = bio->bi_opf;
__entry->type = type; __entry->type = type;

View File

@ -242,8 +242,7 @@ static void hib_end_io(struct bio *bio)
if (bio->bi_status) { if (bio->bi_status) {
printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n", printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n",
imajor(bio->bi_bdev->bd_inode), MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
iminor(bio->bi_bdev->bd_inode),
(unsigned long long)bio->bi_iter.bi_sector); (unsigned long long)bio->bi_iter.bi_sector);
} }
@ -270,7 +269,7 @@ static int hib_submit_io(int op, int op_flags, pgoff_t page_off, void *addr,
bio = bio_alloc(__GFP_RECLAIM | __GFP_HIGH, 1); bio = bio_alloc(__GFP_RECLAIM | __GFP_HIGH, 1);
bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9); bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9);
bio->bi_bdev = hib_resume_bdev; bio_set_dev(bio, hib_resume_bdev);
bio_set_op_attrs(bio, op, op_flags); bio_set_op_attrs(bio, op, op_flags);
if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) { if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) {

View File

@ -963,7 +963,7 @@ static void blk_add_trace_bio_remap(void *ignore,
return; return;
r.device_from = cpu_to_be32(dev); r.device_from = cpu_to_be32(dev);
r.device_to = cpu_to_be32(bio->bi_bdev->bd_dev); r.device_to = cpu_to_be32(bio_dev(bio));
r.sector_from = cpu_to_be64(from); r.sector_from = cpu_to_be64(from);
__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size, __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,

View File

@ -31,7 +31,10 @@ static struct bio *get_swap_bio(gfp_t gfp_flags,
bio = bio_alloc(gfp_flags, 1); bio = bio_alloc(gfp_flags, 1);
if (bio) { if (bio) {
bio->bi_iter.bi_sector = map_swap_page(page, &bio->bi_bdev); struct block_device *bdev;
bio->bi_iter.bi_sector = map_swap_page(page, &bdev);
bio_set_dev(bio, bdev);
bio->bi_iter.bi_sector <<= PAGE_SHIFT - 9; bio->bi_iter.bi_sector <<= PAGE_SHIFT - 9;
bio->bi_end_io = end_io; bio->bi_end_io = end_io;
@ -57,8 +60,7 @@ void end_swap_bio_write(struct bio *bio)
*/ */
set_page_dirty(page); set_page_dirty(page);
pr_alert("Write-error on swap-device (%u:%u:%llu)\n", pr_alert("Write-error on swap-device (%u:%u:%llu)\n",
imajor(bio->bi_bdev->bd_inode), MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
iminor(bio->bi_bdev->bd_inode),
(unsigned long long)bio->bi_iter.bi_sector); (unsigned long long)bio->bi_iter.bi_sector);
ClearPageReclaim(page); ClearPageReclaim(page);
} }
@ -123,8 +125,7 @@ static void end_swap_bio_read(struct bio *bio)
SetPageError(page); SetPageError(page);
ClearPageUptodate(page); ClearPageUptodate(page);
pr_alert("Read-error on swap-device (%u:%u:%llu)\n", pr_alert("Read-error on swap-device (%u:%u:%llu)\n",
imajor(bio->bi_bdev->bd_inode), MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
iminor(bio->bi_bdev->bd_inode),
(unsigned long long)bio->bi_iter.bi_sector); (unsigned long long)bio->bi_iter.bi_sector);
goto out; goto out;
} }
@ -338,7 +339,7 @@ int swap_readpage(struct page *page, bool do_poll)
int ret = 0; int ret = 0;
struct swap_info_struct *sis = page_swap_info(page); struct swap_info_struct *sis = page_swap_info(page);
blk_qc_t qc; blk_qc_t qc;
struct block_device *bdev; struct gendisk *disk;
VM_BUG_ON_PAGE(!PageSwapCache(page), page); VM_BUG_ON_PAGE(!PageSwapCache(page), page);
VM_BUG_ON_PAGE(!PageLocked(page), page); VM_BUG_ON_PAGE(!PageLocked(page), page);
@ -377,7 +378,7 @@ int swap_readpage(struct page *page, bool do_poll)
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
} }
bdev = bio->bi_bdev; disk = bio->bi_disk;
bio->bi_private = current; bio->bi_private = current;
bio_set_op_attrs(bio, REQ_OP_READ, 0); bio_set_op_attrs(bio, REQ_OP_READ, 0);
count_vm_event(PSWPIN); count_vm_event(PSWPIN);
@ -388,7 +389,7 @@ int swap_readpage(struct page *page, bool do_poll)
if (!READ_ONCE(bio->bi_private)) if (!READ_ONCE(bio->bi_private))
break; break;
if (!blk_mq_poll(bdev_get_queue(bdev), qc)) if (!blk_mq_poll(disk->queue, qc))
break; break;
} }
__set_current_state(TASK_RUNNING); __set_current_state(TASK_RUNNING);