mirror of https://gitee.com/openkylin/linux.git
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
Pull block layer fixes from Jens Axboe: "Just a set of small fixes that have either been queued up after the original pull for this merge window, or just missed the original pull request. - a few bcache fixes/changes from Eric and Kent - add WRITE_SAME to the command filter whitelist frm Mauricio - kill an unused struct member from Ritesh - partition IO alignment fix from Stefan - nvme sysfs printf fix from Stephen" * 'for-linus' of git://git.kernel.dk/linux-block: block: check partition alignment nvme : Use correct scnprintf in cmb show block: allow WRITE_SAME commands with the SG_IO ioctl block: Remove unused member (busy) from struct blk_queue_tag bcache: partition support: add 16 minors per bcacheN device bcache: Make gc wakeup sane, remove set_task_state()
This commit is contained in:
commit
af22941ae1
|
@ -45,6 +45,9 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user
|
|||
|| pstart < 0 || plength < 0 || partno > 65535)
|
||||
return -EINVAL;
|
||||
}
|
||||
/* check if partition is aligned to blocksize */
|
||||
if (p.start & (bdev_logical_block_size(bdev) - 1))
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&bdev->bd_mutex);
|
||||
|
||||
|
|
|
@ -182,6 +182,9 @@ static void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter)
|
|||
__set_bit(WRITE_16, filter->write_ok);
|
||||
__set_bit(WRITE_LONG, filter->write_ok);
|
||||
__set_bit(WRITE_LONG_2, filter->write_ok);
|
||||
__set_bit(WRITE_SAME, filter->write_ok);
|
||||
__set_bit(WRITE_SAME_16, filter->write_ok);
|
||||
__set_bit(WRITE_SAME_32, filter->write_ok);
|
||||
__set_bit(ERASE, filter->write_ok);
|
||||
__set_bit(GPCMD_MODE_SELECT_10, filter->write_ok);
|
||||
__set_bit(MODE_SELECT, filter->write_ok);
|
||||
|
|
|
@ -425,7 +425,7 @@ struct cache {
|
|||
* until a gc finishes - otherwise we could pointlessly burn a ton of
|
||||
* cpu
|
||||
*/
|
||||
unsigned invalidate_needs_gc:1;
|
||||
unsigned invalidate_needs_gc;
|
||||
|
||||
bool discard; /* Get rid of? */
|
||||
|
||||
|
@ -593,8 +593,8 @@ struct cache_set {
|
|||
|
||||
/* Counts how many sectors bio_insert has added to the cache */
|
||||
atomic_t sectors_to_gc;
|
||||
wait_queue_head_t gc_wait;
|
||||
|
||||
wait_queue_head_t moving_gc_wait;
|
||||
struct keybuf moving_gc_keys;
|
||||
/* Number of moving GC bios in flight */
|
||||
struct semaphore moving_in_flight;
|
||||
|
|
|
@ -1757,32 +1757,34 @@ static void bch_btree_gc(struct cache_set *c)
|
|||
bch_moving_gc(c);
|
||||
}
|
||||
|
||||
static int bch_gc_thread(void *arg)
|
||||
static bool gc_should_run(struct cache_set *c)
|
||||
{
|
||||
struct cache_set *c = arg;
|
||||
struct cache *ca;
|
||||
unsigned i;
|
||||
|
||||
while (1) {
|
||||
again:
|
||||
bch_btree_gc(c);
|
||||
for_each_cache(ca, c, i)
|
||||
if (ca->invalidate_needs_gc)
|
||||
return true;
|
||||
|
||||
if (atomic_read(&c->sectors_to_gc) < 0)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static int bch_gc_thread(void *arg)
|
||||
{
|
||||
struct cache_set *c = arg;
|
||||
|
||||
while (1) {
|
||||
wait_event_interruptible(c->gc_wait,
|
||||
kthread_should_stop() || gc_should_run(c));
|
||||
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
if (kthread_should_stop())
|
||||
break;
|
||||
|
||||
mutex_lock(&c->bucket_lock);
|
||||
|
||||
for_each_cache(ca, c, i)
|
||||
if (ca->invalidate_needs_gc) {
|
||||
mutex_unlock(&c->bucket_lock);
|
||||
set_current_state(TASK_RUNNING);
|
||||
goto again;
|
||||
}
|
||||
|
||||
mutex_unlock(&c->bucket_lock);
|
||||
|
||||
schedule();
|
||||
set_gc_sectors(c);
|
||||
bch_btree_gc(c);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -1790,11 +1792,10 @@ static int bch_gc_thread(void *arg)
|
|||
|
||||
int bch_gc_thread_start(struct cache_set *c)
|
||||
{
|
||||
c->gc_thread = kthread_create(bch_gc_thread, c, "bcache_gc");
|
||||
c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc");
|
||||
if (IS_ERR(c->gc_thread))
|
||||
return PTR_ERR(c->gc_thread);
|
||||
|
||||
set_task_state(c->gc_thread, TASK_INTERRUPTIBLE);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -260,8 +260,7 @@ void bch_initial_mark_key(struct cache_set *, int, struct bkey *);
|
|||
|
||||
static inline void wake_up_gc(struct cache_set *c)
|
||||
{
|
||||
if (c->gc_thread)
|
||||
wake_up_process(c->gc_thread);
|
||||
wake_up(&c->gc_wait);
|
||||
}
|
||||
|
||||
#define MAP_DONE 0
|
||||
|
|
|
@ -196,10 +196,8 @@ static void bch_data_insert_start(struct closure *cl)
|
|||
struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
|
||||
struct bio *bio = op->bio, *n;
|
||||
|
||||
if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) {
|
||||
set_gc_sectors(op->c);
|
||||
if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0)
|
||||
wake_up_gc(op->c);
|
||||
}
|
||||
|
||||
if (op->bypass)
|
||||
return bch_data_invalidate(cl);
|
||||
|
|
|
@ -58,6 +58,7 @@ static wait_queue_head_t unregister_wait;
|
|||
struct workqueue_struct *bcache_wq;
|
||||
|
||||
#define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE)
|
||||
#define BCACHE_MINORS 16 /* partition support */
|
||||
|
||||
/* Superblock */
|
||||
|
||||
|
@ -783,8 +784,10 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
|
|||
if (minor < 0)
|
||||
return minor;
|
||||
|
||||
minor *= BCACHE_MINORS;
|
||||
|
||||
if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
|
||||
!(d->disk = alloc_disk(1))) {
|
||||
!(d->disk = alloc_disk(BCACHE_MINORS))) {
|
||||
ida_simple_remove(&bcache_minor, minor);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -1489,6 +1492,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
|
|||
mutex_init(&c->bucket_lock);
|
||||
init_waitqueue_head(&c->btree_cache_wait);
|
||||
init_waitqueue_head(&c->bucket_wait);
|
||||
init_waitqueue_head(&c->gc_wait);
|
||||
sema_init(&c->uuid_write_mutex, 1);
|
||||
|
||||
spin_lock_init(&c->btree_gc_time.lock);
|
||||
|
@ -1548,6 +1552,7 @@ static void run_cache_set(struct cache_set *c)
|
|||
|
||||
for_each_cache(ca, c, i)
|
||||
c->nbuckets += ca->sb.nbuckets;
|
||||
set_gc_sectors(c);
|
||||
|
||||
if (CACHE_SYNC(&c->sb)) {
|
||||
LIST_HEAD(journal);
|
||||
|
|
|
@ -50,7 +50,7 @@
|
|||
#define NVME_AQ_DEPTH 256
|
||||
#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
|
||||
#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
|
||||
|
||||
|
||||
/*
|
||||
* We handle AEN commands ourselves and don't even let the
|
||||
* block layer know about them.
|
||||
|
@ -1349,7 +1349,7 @@ static ssize_t nvme_cmb_show(struct device *dev,
|
|||
{
|
||||
struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "cmbloc : x%08x\ncmbsz : x%08x\n",
|
||||
return scnprintf(buf, PAGE_SIZE, "cmbloc : x%08x\ncmbsz : x%08x\n",
|
||||
ndev->cmbloc, ndev->cmbsz);
|
||||
}
|
||||
static DEVICE_ATTR(cmb, S_IRUGO, nvme_cmb_show, NULL);
|
||||
|
|
|
@ -288,7 +288,6 @@ enum blk_queue_state {
|
|||
struct blk_queue_tag {
|
||||
struct request **tag_index; /* map of busy tags */
|
||||
unsigned long *tag_map; /* bit map of free/busy tags */
|
||||
int busy; /* current depth */
|
||||
int max_depth; /* what we will send to device */
|
||||
int real_max_depth; /* what the array can hold */
|
||||
atomic_t refcnt; /* map can be shared */
|
||||
|
|
Loading…
Reference in New Issue