Merge branch 'for-linus' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe: "Make the block layer great again. Basically three amazing fixes in this pull request, split into 4 patches. Believe me, they should go into 4.4. Two of them fix a regression, the third and last fixes an easy-to-trigger bug. - Fix a bad irq enable through null_blk, for queue_mode=1 and using timer completions. Add a block helper to restart a queue asynchronously, and use that from null_blk. From me. - Fix a performance issue in NVMe. Some devices (Intel Pxxxx) expose a stripe boundary, and performance suffers if we cross it. We took that into account for merging, but not for the newer splitting code. Fix from Keith. - Fix a kernel oops in lightnvm with multiple channels. From Matias" * 'for-linus' of git://git.kernel.dk/linux-block: lightnvm: wrong offset in bad blk lun calculation null_blk: use async queue restart helper block: add blk_start_queue_async() block: Split bios on chunk boundaries
This commit is contained in:
commit
c6169202e4
|
@ -206,6 +206,22 @@ void blk_delay_queue(struct request_queue *q, unsigned long msecs)
|
|||
}
|
||||
EXPORT_SYMBOL(blk_delay_queue);
|
||||
|
||||
/**
|
||||
* blk_start_queue_async - asynchronously restart a previously stopped queue
|
||||
* @q: The &struct request_queue in question
|
||||
*
|
||||
* Description:
|
||||
* blk_start_queue_async() will clear the stop flag on the queue, and
|
||||
* ensure that the request_fn for the queue is run from an async
|
||||
* context.
|
||||
**/
|
||||
void blk_start_queue_async(struct request_queue *q)
|
||||
{
|
||||
queue_flag_clear(QUEUE_FLAG_STOPPED, q);
|
||||
blk_run_queue_async(q);
|
||||
}
|
||||
EXPORT_SYMBOL(blk_start_queue_async);
|
||||
|
||||
/**
|
||||
* blk_start_queue - restart a previously stopped queue
|
||||
* @q: The &struct request_queue in question
|
||||
|
|
|
@ -81,7 +81,7 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
|
|||
struct bio *new = NULL;
|
||||
|
||||
bio_for_each_segment(bv, bio, iter) {
|
||||
if (sectors + (bv.bv_len >> 9) > queue_max_sectors(q))
|
||||
if (sectors + (bv.bv_len >> 9) > blk_max_size_offset(q, bio->bi_iter.bi_sector))
|
||||
goto split;
|
||||
|
||||
/*
|
||||
|
|
|
@ -232,20 +232,19 @@ static void end_cmd(struct nullb_cmd *cmd)
|
|||
break;
|
||||
case NULL_Q_BIO:
|
||||
bio_endio(cmd->bio);
|
||||
goto free_cmd;
|
||||
break;
|
||||
}
|
||||
|
||||
free_cmd(cmd);
|
||||
|
||||
/* Restart queue if needed, as we are freeing a tag */
|
||||
if (q && !q->mq_ops && blk_queue_stopped(q)) {
|
||||
if (queue_mode == NULL_Q_RQ && blk_queue_stopped(q)) {
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(q->queue_lock, flags);
|
||||
if (blk_queue_stopped(q))
|
||||
blk_start_queue(q);
|
||||
blk_start_queue_async(q);
|
||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||
}
|
||||
free_cmd:
|
||||
free_cmd(cmd);
|
||||
}
|
||||
|
||||
static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
|
||||
|
|
|
@ -75,7 +75,7 @@ static int gennvm_block_bb(struct ppa_addr ppa, int nr_blocks, u8 *blks,
|
|||
struct nvm_block *blk;
|
||||
int i;
|
||||
|
||||
lun = &gn->luns[(dev->nr_luns * ppa.g.ch) + ppa.g.lun];
|
||||
lun = &gn->luns[(dev->luns_per_chnl * ppa.g.ch) + ppa.g.lun];
|
||||
|
||||
for (i = 0; i < nr_blocks; i++) {
|
||||
if (blks[i] == 0)
|
||||
|
|
|
@ -797,6 +797,7 @@ extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
|
|||
extern int blk_queue_enter(struct request_queue *q, gfp_t gfp);
|
||||
extern void blk_queue_exit(struct request_queue *q);
|
||||
extern void blk_start_queue(struct request_queue *q);
|
||||
extern void blk_start_queue_async(struct request_queue *q);
|
||||
extern void blk_stop_queue(struct request_queue *q);
|
||||
extern void blk_sync_queue(struct request_queue *q);
|
||||
extern void __blk_stop_queue(struct request_queue *q);
|
||||
|
|
Loading…
Reference in New Issue