for-linus-20190323
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAlyWVjYQHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgpghzEADViI9AC1ixO/4/L09T3LD6WrKa+GfAfl95 JhbBB5CzaSYtw1lW2sciXO3vD6Smgbs9Mir3SvKxj5QjbMjKUhlLGf2jwfsiokY2 PU/rZkd5ueioQ/jm6ABgos1hXOVz6EjDO2B8ERBPTJ19vLWVtpTAh9yAGYWFnY8m etiNAFpcM3iVeeaLFlCEbWk4O2A3oPvwm6rz41h81mbKTmFc6BBNSSb/FwtsRfP4 vLK+JOBBy1jEwpjULoi9FmNVa+QCjfvAhK3kwc482+AoF5HHFeo5LnLOMjcaPTSy W3Nefh+Jhzm0ERSX+q4biJ1ly/doFpmFfdmzXbFaWkLBQENx2MZkb1MS8SSxRV1N hxRACtY8DYAlDLDJ6SsLGgJ0js6282hGPPR5DVxp4VP1iWvobUx/QcoR14lVvpFt 1g/jFDuU18JW5lxY/gIYT6PGjZRpdqdaqhhI6XmmMj3V0zo6Z4UrX1FxaoNn9fqP 2JQKUpSvlq0ZFUwHOn91sRbv9Zb1mKgWRsjTUPnFL8dAiHOgl7ZS8qvgBRkKsl4D 54aG2pvBSVOa+8S+UAFfYJkZPiv8eHg8WqeIy3e8J+AROtuP2ccitt36cvltl/MS yivVzUenyv+SZSlTMuYbQfQRmm5CTit0NSDrclrB/qc1w7EbmjxIF64ohXdatuI5 xCKmvid0Wg== =4Ktc -----END PGP SIGNATURE----- Merge tag 'for-linus-20190323' of git://git.kernel.dk/linux-block Pull block fixes from Jens Axboe: "A set of fixes/changes that should go into this series. This contains: - Kernel doc / comment updates (Bart, Shenghui) - Un-export of core-only used function (Bart) - Fix race on loop file access (Dongli) - pf/pcd queue cleanup fixes (me) - Use appropriate helper for RESTART bit set (Yufen) - Use named identifier for classic poll (Yufen)" * tag 'for-linus-20190323' of git://git.kernel.dk/linux-block: sbitmap: trivial - update comment for sbitmap_deferred_clear_bit blkcg: Fix kernel-doc warnings blk-iolatency: #include "blk.h" block: Unexport blk_mq_add_to_requeue_list() block: add BLK_MQ_POLL_CLASSIC for hybrid poll and return EINVAL for unexpected value blk-mq: remove unused 'nr_expired' from blk_mq_hw_ctx loop: access lo_backing_file only when the loop device is Lo_bound blk-mq: use blk_mq_sched_mark_restart_hctx to set RESTART paride/pcd: cleanup queues when detection fails paride/pf: cleanup queues when detection fails
This commit is contained in:
commit
2335cbe648
|
@ -1736,8 +1736,8 @@ void blkcg_maybe_throttle_current(void)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* blkcg_schedule_throttle - this task needs to check for throttling
|
* blkcg_schedule_throttle - this task needs to check for throttling
|
||||||
* @q - the request queue IO was submitted on
|
* @q: the request queue IO was submitted on
|
||||||
* @use_memdelay - do we charge this to memory delay for PSI
|
* @use_memdelay: do we charge this to memory delay for PSI
|
||||||
*
|
*
|
||||||
* This is called by the IO controller when we know there's delay accumulated
|
* This is called by the IO controller when we know there's delay accumulated
|
||||||
* for the blkg for this task. We do not pass the blkg because there are places
|
* for the blkg for this task. We do not pass the blkg because there are places
|
||||||
|
@ -1769,8 +1769,9 @@ void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* blkcg_add_delay - add delay to this blkg
|
* blkcg_add_delay - add delay to this blkg
|
||||||
* @now - the current time in nanoseconds
|
* @blkg: blkg of interest
|
||||||
* @delta - how many nanoseconds of delay to add
|
* @now: the current time in nanoseconds
|
||||||
|
* @delta: how many nanoseconds of delay to add
|
||||||
*
|
*
|
||||||
* Charge @delta to the blkg's current delay accumulation. This is used to
|
* Charge @delta to the blkg's current delay accumulation. This is used to
|
||||||
* throttle tasks if an IO controller thinks we need more throttling.
|
* throttle tasks if an IO controller thinks we need more throttling.
|
||||||
|
|
|
@ -75,6 +75,7 @@
|
||||||
#include <linux/blk-mq.h>
|
#include <linux/blk-mq.h>
|
||||||
#include "blk-rq-qos.h"
|
#include "blk-rq-qos.h"
|
||||||
#include "blk-stat.h"
|
#include "blk-stat.h"
|
||||||
|
#include "blk.h"
|
||||||
|
|
||||||
#define DEFAULT_SCALE_COOKIE 1000000U
|
#define DEFAULT_SCALE_COOKIE 1000000U
|
||||||
|
|
||||||
|
|
|
@ -782,7 +782,6 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
|
||||||
if (kick_requeue_list)
|
if (kick_requeue_list)
|
||||||
blk_mq_kick_requeue_list(q);
|
blk_mq_kick_requeue_list(q);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
|
|
||||||
|
|
||||||
void blk_mq_kick_requeue_list(struct request_queue *q)
|
void blk_mq_kick_requeue_list(struct request_queue *q)
|
||||||
{
|
{
|
||||||
|
@ -1093,8 +1092,7 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx *hctx,
|
||||||
bool ret;
|
bool ret;
|
||||||
|
|
||||||
if (!(hctx->flags & BLK_MQ_F_TAG_SHARED)) {
|
if (!(hctx->flags & BLK_MQ_F_TAG_SHARED)) {
|
||||||
if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state))
|
blk_mq_sched_mark_restart_hctx(hctx);
|
||||||
set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* It's possible that a tag was freed in the window between the
|
* It's possible that a tag was freed in the window between the
|
||||||
|
@ -2857,7 +2855,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
|
||||||
/*
|
/*
|
||||||
* Default to classic polling
|
* Default to classic polling
|
||||||
*/
|
*/
|
||||||
q->poll_nsec = -1;
|
q->poll_nsec = BLK_MQ_POLL_CLASSIC;
|
||||||
|
|
||||||
blk_mq_init_cpu_queues(q, set->nr_hw_queues);
|
blk_mq_init_cpu_queues(q, set->nr_hw_queues);
|
||||||
blk_mq_add_queue_tag_set(set, q);
|
blk_mq_add_queue_tag_set(set, q);
|
||||||
|
@ -3392,7 +3390,7 @@ static bool blk_mq_poll_hybrid(struct request_queue *q,
|
||||||
{
|
{
|
||||||
struct request *rq;
|
struct request *rq;
|
||||||
|
|
||||||
if (q->poll_nsec == -1)
|
if (q->poll_nsec == BLK_MQ_POLL_CLASSIC)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (!blk_qc_t_is_internal(cookie))
|
if (!blk_qc_t_is_internal(cookie))
|
||||||
|
|
|
@ -41,6 +41,8 @@ void blk_mq_free_queue(struct request_queue *q);
|
||||||
int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
|
int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
|
||||||
void blk_mq_wake_waiters(struct request_queue *q);
|
void blk_mq_wake_waiters(struct request_queue *q);
|
||||||
bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool);
|
bool blk_mq_dispatch_rq_list(struct request_queue *, struct list_head *, bool);
|
||||||
|
void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
|
||||||
|
bool kick_requeue_list);
|
||||||
void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
|
void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
|
||||||
bool blk_mq_get_driver_tag(struct request *rq);
|
bool blk_mq_get_driver_tag(struct request *rq);
|
||||||
struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
|
struct request *blk_mq_dequeue_from_ctx(struct blk_mq_hw_ctx *hctx,
|
||||||
|
|
|
@ -360,8 +360,8 @@ static ssize_t queue_poll_delay_show(struct request_queue *q, char *page)
|
||||||
{
|
{
|
||||||
int val;
|
int val;
|
||||||
|
|
||||||
if (q->poll_nsec == -1)
|
if (q->poll_nsec == BLK_MQ_POLL_CLASSIC)
|
||||||
val = -1;
|
val = BLK_MQ_POLL_CLASSIC;
|
||||||
else
|
else
|
||||||
val = q->poll_nsec / 1000;
|
val = q->poll_nsec / 1000;
|
||||||
|
|
||||||
|
@ -380,10 +380,12 @@ static ssize_t queue_poll_delay_store(struct request_queue *q, const char *page,
|
||||||
if (err < 0)
|
if (err < 0)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
if (val == -1)
|
if (val == BLK_MQ_POLL_CLASSIC)
|
||||||
q->poll_nsec = -1;
|
q->poll_nsec = BLK_MQ_POLL_CLASSIC;
|
||||||
else
|
else if (val >= 0)
|
||||||
q->poll_nsec = val * 1000;
|
q->poll_nsec = val * 1000;
|
||||||
|
else
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
|
|
@ -656,7 +656,7 @@ static int loop_validate_file(struct file *file, struct block_device *bdev)
|
||||||
return -EBADF;
|
return -EBADF;
|
||||||
|
|
||||||
l = f->f_mapping->host->i_bdev->bd_disk->private_data;
|
l = f->f_mapping->host->i_bdev->bd_disk->private_data;
|
||||||
if (l->lo_state == Lo_unbound) {
|
if (l->lo_state != Lo_bound) {
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
f = l->lo_backing_file;
|
f = l->lo_backing_file;
|
||||||
|
|
|
@ -749,8 +749,12 @@ static int pcd_detect(void)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
printk("%s: No CD-ROM drive found\n", name);
|
printk("%s: No CD-ROM drive found\n", name);
|
||||||
for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++)
|
for (unit = 0, cd = pcd; unit < PCD_UNITS; unit++, cd++) {
|
||||||
|
blk_cleanup_queue(cd->disk->queue);
|
||||||
|
cd->disk->queue = NULL;
|
||||||
|
blk_mq_free_tag_set(&cd->tag_set);
|
||||||
put_disk(cd->disk);
|
put_disk(cd->disk);
|
||||||
|
}
|
||||||
pi_unregister_driver(par_drv);
|
pi_unregister_driver(par_drv);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
|
@ -761,8 +761,12 @@ static int pf_detect(void)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
printk("%s: No ATAPI disk detected\n", name);
|
printk("%s: No ATAPI disk detected\n", name);
|
||||||
for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++)
|
for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
|
||||||
|
blk_cleanup_queue(pf->disk->queue);
|
||||||
|
pf->disk->queue = NULL;
|
||||||
|
blk_mq_free_tag_set(&pf->tag_set);
|
||||||
put_disk(pf->disk);
|
put_disk(pf->disk);
|
||||||
|
}
|
||||||
pi_unregister_driver(par_drv);
|
pi_unregister_driver(par_drv);
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
@ -1047,13 +1051,15 @@ static void __exit pf_exit(void)
|
||||||
int unit;
|
int unit;
|
||||||
unregister_blkdev(major, name);
|
unregister_blkdev(major, name);
|
||||||
for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
|
for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) {
|
||||||
if (!pf->present)
|
if (pf->present)
|
||||||
continue;
|
del_gendisk(pf->disk);
|
||||||
del_gendisk(pf->disk);
|
|
||||||
blk_cleanup_queue(pf->disk->queue);
|
blk_cleanup_queue(pf->disk->queue);
|
||||||
blk_mq_free_tag_set(&pf->tag_set);
|
blk_mq_free_tag_set(&pf->tag_set);
|
||||||
put_disk(pf->disk);
|
put_disk(pf->disk);
|
||||||
pi_release(pf->pi);
|
|
||||||
|
if (pf->present)
|
||||||
|
pi_release(pf->pi);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -57,7 +57,6 @@ struct blk_mq_hw_ctx {
|
||||||
unsigned int queue_num;
|
unsigned int queue_num;
|
||||||
|
|
||||||
atomic_t nr_active;
|
atomic_t nr_active;
|
||||||
unsigned int nr_expired;
|
|
||||||
|
|
||||||
struct hlist_node cpuhp_dead;
|
struct hlist_node cpuhp_dead;
|
||||||
struct kobject kobj;
|
struct kobject kobj;
|
||||||
|
@ -300,8 +299,6 @@ void blk_mq_end_request(struct request *rq, blk_status_t error);
|
||||||
void __blk_mq_end_request(struct request *rq, blk_status_t error);
|
void __blk_mq_end_request(struct request *rq, blk_status_t error);
|
||||||
|
|
||||||
void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
|
void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list);
|
||||||
void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
|
|
||||||
bool kick_requeue_list);
|
|
||||||
void blk_mq_kick_requeue_list(struct request_queue *q);
|
void blk_mq_kick_requeue_list(struct request_queue *q);
|
||||||
void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
|
void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
|
||||||
bool blk_mq_complete_request(struct request *rq);
|
bool blk_mq_complete_request(struct request *rq);
|
||||||
|
|
|
@ -50,6 +50,9 @@ struct blk_stat_callback;
|
||||||
/* Must be consistent with blk_mq_poll_stats_bkt() */
|
/* Must be consistent with blk_mq_poll_stats_bkt() */
|
||||||
#define BLK_MQ_POLL_STATS_BKTS 16
|
#define BLK_MQ_POLL_STATS_BKTS 16
|
||||||
|
|
||||||
|
/* Doing classic polling */
|
||||||
|
#define BLK_MQ_POLL_CLASSIC -1
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Maximum number of blkcg policies allowed to be registered concurrently.
|
* Maximum number of blkcg policies allowed to be registered concurrently.
|
||||||
* Defined here to simplify include dependency.
|
* Defined here to simplify include dependency.
|
||||||
|
|
|
@ -330,7 +330,7 @@ static inline void sbitmap_clear_bit(struct sbitmap *sb, unsigned int bitnr)
|
||||||
/*
|
/*
|
||||||
* This one is special, since it doesn't actually clear the bit, rather it
|
* This one is special, since it doesn't actually clear the bit, rather it
|
||||||
* sets the corresponding bit in the ->cleared mask instead. Paired with
|
* sets the corresponding bit in the ->cleared mask instead. Paired with
|
||||||
* the caller doing sbitmap_batch_clear() if a given index is full, which
|
* the caller doing sbitmap_deferred_clear() if a given index is full, which
|
||||||
* will clear the previously freed entries in the corresponding ->word.
|
* will clear the previously freed entries in the corresponding ->word.
|
||||||
*/
|
*/
|
||||||
static inline void sbitmap_deferred_clear_bit(struct sbitmap *sb, unsigned int bitnr)
|
static inline void sbitmap_deferred_clear_bit(struct sbitmap *sb, unsigned int bitnr)
|
||||||
|
|
Loading…
Reference in New Issue