blk-merge: compute bio->bi_seg_front_size efficiently

It is enough to check and compute bio->bi_seg_front_size just
after the 1st segment is found, but current code checks that
for each bvec, which is inefficient.

This patch follows the way in  __blk_recalc_rq_segments()
for computing bio->bi_seg_front_size, and it is more efficient
and code becomes more readable too.

Signed-off-by: Ming Lei <ming.lei@redhat.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
Ming Lei 2017-12-18 20:22:14 +08:00 committed by Jens Axboe
parent 92681eca61
commit 6a501bf080
1 changed files with 5 additions and 4 deletions

View File

@ -146,22 +146,21 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
bvprvp = &bvprv; bvprvp = &bvprv;
sectors += bv.bv_len >> 9; sectors += bv.bv_len >> 9;
if (nsegs == 1 && seg_size > front_seg_size)
front_seg_size = seg_size;
continue; continue;
} }
new_segment: new_segment:
if (nsegs == queue_max_segments(q)) if (nsegs == queue_max_segments(q))
goto split; goto split;
if (nsegs == 1 && seg_size > front_seg_size)
front_seg_size = seg_size;
nsegs++; nsegs++;
bvprv = bv; bvprv = bv;
bvprvp = &bvprv; bvprvp = &bvprv;
seg_size = bv.bv_len; seg_size = bv.bv_len;
sectors += bv.bv_len >> 9; sectors += bv.bv_len >> 9;
if (nsegs == 1 && seg_size > front_seg_size)
front_seg_size = seg_size;
} }
do_split = false; do_split = false;
@ -174,6 +173,8 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
bio = new; bio = new;
} }
if (nsegs == 1 && seg_size > front_seg_size)
front_seg_size = seg_size;
bio->bi_seg_front_size = front_seg_size; bio->bi_seg_front_size = front_seg_size;
if (seg_size > bio->bi_seg_back_size) if (seg_size > bio->bi_seg_back_size)
bio->bi_seg_back_size = seg_size; bio->bi_seg_back_size = seg_size;