Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "A collection of fixes from the last few weeks that should go into the
  current series.  This contains:

   - Various fixes for the per-blkcg policy data, fixing regressions
     since 4.1.  From Arianna and Tejun

   - Code cleanup for bcache closure macros from me.  Really just
     flushing this out, it's been sitting in another branch for months

   - FIELD_SIZEOF cleanup from Maninder Singh

   - bio integrity oops fix from Mike

   - Timeout regression fix for blk-mq from Ming Lei"

* 'for-linus' of git://git.kernel.dk/linux-block:
  blk-mq: set default timeout as 30 seconds
  NVMe: Reread partitions on metadata formats
  bcache: don't embed 'return' statements in closure macros
  blkcg: fix blkcg_policy_data allocation bug
  blkcg: implement all_blkcgs list
  blkcg: blkcg_css_alloc() should grab blkcg_pol_mutex while iterating blkcg_policy[]
  blkcg: allow blkcg_pol_mutex to be grabbed from cgroup [file] methods
  block/blk-cgroup.c: free per-blkcg data when freeing the blkcg
  block: use FIELD_SIZEOF to calculate size of a field
  bio integrity: do not assume bio_integrity_pool exists if bioset exists
This commit is contained in:
Linus Torvalds 2015-07-16 16:38:08 -07:00
commit 761ab7664b
10 changed files with 113 additions and 79 deletions

View File

@ -51,7 +51,7 @@ struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
unsigned long idx = BIO_POOL_NONE;
unsigned inline_vecs;
if (!bs) {
if (!bs || !bs->bio_integrity_pool) {
bip = kmalloc(sizeof(struct bio_integrity_payload) +
sizeof(struct bio_vec) * nr_vecs, gfp_mask);
inline_vecs = nr_vecs;
@ -104,7 +104,7 @@ void bio_integrity_free(struct bio *bio)
kfree(page_address(bip->bip_vec->bv_page) +
bip->bip_vec->bv_offset);
if (bs) {
if (bs && bs->bio_integrity_pool) {
if (bip->bip_slab != BIO_POOL_NONE)
bvec_free(bs->bvec_integrity_pool, bip->bip_vec,
bip->bip_slab);

View File

@ -29,6 +29,14 @@
#define MAX_KEY_LEN 100
/*
* blkcg_pol_mutex protects blkcg_policy[] and policy [de]activation.
* blkcg_pol_register_mutex nests outside of it and synchronizes entire
* policy [un]register operations including cgroup file additions /
* removals. Putting cgroup file registration outside blkcg_pol_mutex
* allows grabbing it from cgroup callbacks.
*/
static DEFINE_MUTEX(blkcg_pol_register_mutex);
static DEFINE_MUTEX(blkcg_pol_mutex);
struct blkcg blkcg_root;
@ -38,6 +46,8 @@ struct cgroup_subsys_state * const blkcg_root_css = &blkcg_root.css;
static struct blkcg_policy *blkcg_policy[BLKCG_MAX_POLS];
static LIST_HEAD(all_blkcgs); /* protected by blkcg_pol_mutex */
static bool blkcg_policy_enabled(struct request_queue *q,
const struct blkcg_policy *pol)
{
@ -453,20 +463,7 @@ static int blkcg_reset_stats(struct cgroup_subsys_state *css,
struct blkcg_gq *blkg;
int i;
/*
* XXX: We invoke cgroup_add/rm_cftypes() under blkcg_pol_mutex
* which ends up putting cgroup's internal cgroup_tree_mutex under
* it; however, cgroup_tree_mutex is nested above cgroup file
* active protection and grabbing blkcg_pol_mutex from a cgroup
* file operation creates a possible circular dependency. cgroup
* internal locking is planned to go through further simplification
* and this issue should go away soon. For now, let's trylock
* blkcg_pol_mutex and restart the write on failure.
*
* http://lkml.kernel.org/g/5363C04B.4010400@oracle.com
*/
if (!mutex_trylock(&blkcg_pol_mutex))
return restart_syscall();
mutex_lock(&blkcg_pol_mutex);
spin_lock_irq(&blkcg->lock);
/*
@ -822,8 +819,17 @@ static void blkcg_css_free(struct cgroup_subsys_state *css)
{
struct blkcg *blkcg = css_to_blkcg(css);
if (blkcg != &blkcg_root)
mutex_lock(&blkcg_pol_mutex);
list_del(&blkcg->all_blkcgs_node);
mutex_unlock(&blkcg_pol_mutex);
if (blkcg != &blkcg_root) {
int i;
for (i = 0; i < BLKCG_MAX_POLS; i++)
kfree(blkcg->pd[i]);
kfree(blkcg);
}
}
static struct cgroup_subsys_state *
@ -833,6 +839,8 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
struct cgroup_subsys_state *ret;
int i;
mutex_lock(&blkcg_pol_mutex);
if (!parent_css) {
blkcg = &blkcg_root;
goto done;
@ -875,14 +883,17 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
#ifdef CONFIG_CGROUP_WRITEBACK
INIT_LIST_HEAD(&blkcg->cgwb_list);
#endif
list_add_tail(&blkcg->all_blkcgs_node, &all_blkcgs);
mutex_unlock(&blkcg_pol_mutex);
return &blkcg->css;
free_pd_blkcg:
for (i--; i >= 0; i--)
kfree(blkcg->pd[i]);
free_blkcg:
kfree(blkcg);
mutex_unlock(&blkcg_pol_mutex);
return ret;
}
@ -1037,10 +1048,8 @@ int blkcg_activate_policy(struct request_queue *q,
const struct blkcg_policy *pol)
{
LIST_HEAD(pds);
LIST_HEAD(cpds);
struct blkcg_gq *blkg;
struct blkg_policy_data *pd, *nd;
struct blkcg_policy_data *cpd, *cnd;
int cnt = 0, ret;
if (blkcg_policy_enabled(q, pol))
@ -1053,10 +1062,7 @@ int blkcg_activate_policy(struct request_queue *q,
cnt++;
spin_unlock_irq(q->queue_lock);
/*
* Allocate per-blkg and per-blkcg policy data
* for all existing blkgs.
*/
/* allocate per-blkg policy data for all existing blkgs */
while (cnt--) {
pd = kzalloc_node(pol->pd_size, GFP_KERNEL, q->node);
if (!pd) {
@ -1064,15 +1070,6 @@ int blkcg_activate_policy(struct request_queue *q,
goto out_free;
}
list_add_tail(&pd->alloc_node, &pds);
if (!pol->cpd_size)
continue;
cpd = kzalloc_node(pol->cpd_size, GFP_KERNEL, q->node);
if (!cpd) {
ret = -ENOMEM;
goto out_free;
}
list_add_tail(&cpd->alloc_node, &cpds);
}
/*
@ -1082,32 +1079,17 @@ int blkcg_activate_policy(struct request_queue *q,
spin_lock_irq(q->queue_lock);
list_for_each_entry(blkg, &q->blkg_list, q_node) {
if (WARN_ON(list_empty(&pds)) ||
WARN_ON(pol->cpd_size && list_empty(&cpds))) {
if (WARN_ON(list_empty(&pds))) {
/* umm... this shouldn't happen, just abort */
ret = -ENOMEM;
goto out_unlock;
}
cpd = list_first_entry(&cpds, struct blkcg_policy_data,
alloc_node);
list_del_init(&cpd->alloc_node);
pd = list_first_entry(&pds, struct blkg_policy_data, alloc_node);
list_del_init(&pd->alloc_node);
/* grab blkcg lock too while installing @pd on @blkg */
spin_lock(&blkg->blkcg->lock);
if (!pol->cpd_size)
goto no_cpd;
if (!blkg->blkcg->pd[pol->plid]) {
/* Per-policy per-blkcg data */
blkg->blkcg->pd[pol->plid] = cpd;
cpd->plid = pol->plid;
pol->cpd_init_fn(blkg->blkcg);
} else { /* must free it as it has already been extracted */
kfree(cpd);
}
no_cpd:
blkg->pd[pol->plid] = pd;
pd->blkg = blkg;
pd->plid = pol->plid;
@ -1124,8 +1106,6 @@ int blkcg_activate_policy(struct request_queue *q,
blk_queue_bypass_end(q);
list_for_each_entry_safe(pd, nd, &pds, alloc_node)
kfree(pd);
list_for_each_entry_safe(cpd, cnd, &cpds, alloc_node)
kfree(cpd);
return ret;
}
EXPORT_SYMBOL_GPL(blkcg_activate_policy);
@ -1162,8 +1142,6 @@ void blkcg_deactivate_policy(struct request_queue *q,
kfree(blkg->pd[pol->plid]);
blkg->pd[pol->plid] = NULL;
kfree(blkg->blkcg->pd[pol->plid]);
blkg->blkcg->pd[pol->plid] = NULL;
spin_unlock(&blkg->blkcg->lock);
}
@ -1182,11 +1160,13 @@ EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
*/
int blkcg_policy_register(struct blkcg_policy *pol)
{
struct blkcg *blkcg;
int i, ret;
if (WARN_ON(pol->pd_size < sizeof(struct blkg_policy_data)))
return -EINVAL;
mutex_lock(&blkcg_pol_register_mutex);
mutex_lock(&blkcg_pol_mutex);
/* find an empty slot */
@ -1195,19 +1175,49 @@ int blkcg_policy_register(struct blkcg_policy *pol)
if (!blkcg_policy[i])
break;
if (i >= BLKCG_MAX_POLS)
goto out_unlock;
goto err_unlock;
/* register and update blkgs */
/* register @pol */
pol->plid = i;
blkcg_policy[i] = pol;
blkcg_policy[pol->plid] = pol;
/* allocate and install cpd's */
if (pol->cpd_size) {
list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
struct blkcg_policy_data *cpd;
cpd = kzalloc(pol->cpd_size, GFP_KERNEL);
if (!cpd) {
mutex_unlock(&blkcg_pol_mutex);
goto err_free_cpds;
}
blkcg->pd[pol->plid] = cpd;
cpd->plid = pol->plid;
pol->cpd_init_fn(blkcg);
}
}
mutex_unlock(&blkcg_pol_mutex);
/* everything is in place, add intf files for the new policy */
if (pol->cftypes)
WARN_ON(cgroup_add_legacy_cftypes(&blkio_cgrp_subsys,
pol->cftypes));
ret = 0;
out_unlock:
mutex_unlock(&blkcg_pol_register_mutex);
return 0;
err_free_cpds:
if (pol->cpd_size) {
list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
kfree(blkcg->pd[pol->plid]);
blkcg->pd[pol->plid] = NULL;
}
}
blkcg_policy[pol->plid] = NULL;
err_unlock:
mutex_unlock(&blkcg_pol_mutex);
mutex_unlock(&blkcg_pol_register_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(blkcg_policy_register);
@ -1220,7 +1230,9 @@ EXPORT_SYMBOL_GPL(blkcg_policy_register);
*/
void blkcg_policy_unregister(struct blkcg_policy *pol)
{
mutex_lock(&blkcg_pol_mutex);
struct blkcg *blkcg;
mutex_lock(&blkcg_pol_register_mutex);
if (WARN_ON(blkcg_policy[pol->plid] != pol))
goto out_unlock;
@ -1229,9 +1241,19 @@ void blkcg_policy_unregister(struct blkcg_policy *pol)
if (pol->cftypes)
cgroup_rm_cftypes(pol->cftypes);
/* unregister and update blkgs */
/* remove cpds and unregister */
mutex_lock(&blkcg_pol_mutex);
if (pol->cpd_size) {
list_for_each_entry(blkcg, &all_blkcgs, all_blkcgs_node) {
kfree(blkcg->pd[pol->plid]);
blkcg->pd[pol->plid] = NULL;
}
}
blkcg_policy[pol->plid] = NULL;
out_unlock:
mutex_unlock(&blkcg_pol_mutex);
out_unlock:
mutex_unlock(&blkcg_pol_register_mutex);
}
EXPORT_SYMBOL_GPL(blkcg_policy_unregister);

View File

@ -3370,7 +3370,7 @@ EXPORT_SYMBOL(blk_post_runtime_resume);
int __init blk_dev_init(void)
{
BUILD_BUG_ON(__REQ_NR_BITS > 8 *
sizeof(((struct request *)0)->cmd_flags));
FIELD_SIZEOF(struct request, cmd_flags));
/* used for unplugging and affects IO latency/throughput - HIGHPRI */
kblockd_workqueue = alloc_workqueue("kblockd",

View File

@ -1998,7 +1998,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
goto err_hctxs;
setup_timer(&q->timeout, blk_mq_rq_timer, (unsigned long) q);
blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30000);
blk_queue_rq_timeout(q, set->timeout ? set->timeout : 30 * HZ);
q->nr_queues = nr_cpu_ids;
q->nr_hw_queues = set->nr_hw_queues;

View File

@ -2108,8 +2108,17 @@ static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid)
goto out_free_disk;
add_disk(ns->disk);
if (ns->ms)
revalidate_disk(ns->disk);
if (ns->ms) {
struct block_device *bd = bdget_disk(ns->disk, 0);
if (!bd)
return;
if (blkdev_get(bd, FMODE_READ, NULL)) {
bdput(bd);
return;
}
blkdev_reread_part(bd);
blkdev_put(bd, FMODE_READ);
}
return;
out_free_disk:
kfree(disk);

View File

@ -320,7 +320,6 @@ static inline void closure_wake_up(struct closure_waitlist *list)
do { \
set_closure_fn(_cl, _fn, _wq); \
closure_sub(_cl, CLOSURE_RUNNING + 1); \
return; \
} while (0)
/**
@ -349,7 +348,6 @@ do { \
do { \
set_closure_fn(_cl, _fn, _wq); \
closure_queue(_cl); \
return; \
} while (0)
/**
@ -365,7 +363,6 @@ do { \
do { \
set_closure_fn(_cl, _destructor, NULL); \
closure_sub(_cl, CLOSURE_RUNNING - CLOSURE_DESTRUCTOR + 1); \
return; \
} while (0)
/**

View File

@ -105,6 +105,7 @@ void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p)
} while (n != bio);
continue_at(&s->cl, bch_bio_submit_split_done, NULL);
return;
submit:
generic_make_request(bio);
}

View File

@ -592,12 +592,14 @@ static void journal_write_unlocked(struct closure *cl)
if (!w->need_write) {
closure_return_with_destructor(cl, journal_write_unlock);
return;
} else if (journal_full(&c->journal)) {
journal_reclaim(c);
spin_unlock(&c->journal.lock);
btree_flush_write(c);
continue_at(cl, journal_write, system_wq);
return;
}
c->journal.blocks_free -= set_blocks(w->data, block_bytes(c));

View File

@ -88,8 +88,10 @@ static void bch_data_insert_keys(struct closure *cl)
if (journal_ref)
atomic_dec_bug(journal_ref);
if (!op->insert_data_done)
if (!op->insert_data_done) {
continue_at(cl, bch_data_insert_start, op->wq);
return;
}
bch_keylist_free(&op->insert_keys);
closure_return(cl);
@ -216,8 +218,10 @@ static void bch_data_insert_start(struct closure *cl)
/* 1 for the device pointer and 1 for the chksum */
if (bch_keylist_realloc(&op->insert_keys,
3 + (op->csum ? 1 : 0),
op->c))
op->c)) {
continue_at(cl, bch_data_insert_keys, op->wq);
return;
}
k = op->insert_keys.top;
bkey_init(k);
@ -255,6 +259,7 @@ static void bch_data_insert_start(struct closure *cl)
op->insert_data_done = true;
continue_at(cl, bch_data_insert_keys, op->wq);
return;
err:
/* bch_alloc_sectors() blocks if s->writeback = true */
BUG_ON(op->writeback);
@ -576,8 +581,10 @@ static void cache_lookup(struct closure *cl)
ret = bch_btree_map_keys(&s->op, s->iop.c,
&KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
cache_lookup_fn, MAP_END_KEY);
if (ret == -EAGAIN)
if (ret == -EAGAIN) {
continue_at(cl, cache_lookup, bcache_wq);
return;
}
closure_return(cl);
}
@ -1085,6 +1092,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
continue_at_nobarrier(&s->cl,
flash_dev_nodata,
bcache_wq);
return;
} else if (rw) {
bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
&KEY(d->id, bio->bi_iter.bi_sector, 0),

View File

@ -47,6 +47,7 @@ struct blkcg {
struct blkcg_policy_data *pd[BLKCG_MAX_POLS];
struct list_head all_blkcgs_node;
#ifdef CONFIG_CGROUP_WRITEBACK
struct list_head cgwb_list;
#endif
@ -88,18 +89,12 @@ struct blkg_policy_data {
* Policies that need to keep per-blkcg data which is independent
* from any request_queue associated to it must specify its size
* with the cpd_size field of the blkcg_policy structure and
* embed a blkcg_policy_data in it. blkcg core allocates
* policy-specific per-blkcg structures lazily the first time
* they are actually needed, so it handles them together with
* blkgs. cpd_init() is invoked to let each policy handle
* per-blkcg data.
* embed a blkcg_policy_data in it. cpd_init() is invoked to let
* each policy handle per-blkcg data.
*/
struct blkcg_policy_data {
/* the policy id this per-policy data belongs to */
int plid;
/* used during policy activation */
struct list_head alloc_node;
};
/* association between a blk cgroup and a request queue */