mirror of https://gitee.com/openkylin/linux.git
for-5.11/block-2020-12-14
-----BEGIN PGP SIGNATURE----- iQJEBAABCAAuFiEEwPw5LcreJtl1+l5K99NY+ylx4KYFAl/Xec8QHGF4Ym9lQGtl cm5lbC5kawAKCRD301j7KXHgpoLbEACzXypgZWwMdfgRckA/Vt333rXHtbhUV+hK 2XP+P81iRvr9Esi31UPbRp82vrgcDO0cpI1QmQojS5U5TIQP88BfXptfRZZu48eb wT5RDDNQ34HItqAh/yEuYsv9yUKcxeIrB99tBVvM+4UmQg9zTdIW3mg6PvCBdbhV N38jI0tCF/PJatjfRuphT/nXonQLPWBlVDmZk06KZQFOwQe9ep1vUi1+nbiRPuo3 geFBpTh1Kp6Vl1B3n4RpECs6Y7I0RRuJdaH2sDizICla1/BW91F9fQwHimNnUxUq e1Q1kMuh6ftcQGkYlHSYcPhuv6CvorldTZCO5arPxWpcwvxriTSMRPWAgUr5pEiF fhiGhqeDu9e6vl9vS31wUD1B30hy+jFz9wyjRrDwJ3cPHH1JVBjTzvdX+cIh/1ku IbIwUMteUtvUrzqAv/DzbGhedp7xWtOFaVo8j0QFYh9zkjd6b8yDOF/yztwX2gjY Xt1cd+KpDSiN449ZRaoMI0sCJAxqzhMa6nsWlb0L7KuNyWKAbvKQBm9Rb47FLV9A Vx70KC+zkFoyw23capvIahmQazerriUJ5PGe0lVm6ROgmIFdCpXTPDjnrvq/6RZ/ GEpD7gTW9atGJ7EuEE8686sAfKD5kneChWLX5EHXf0d0AG5Mr2lKsluiGp5LpPJg Q1Xqs6xwww== =zo4w -----END PGP SIGNATURE----- Merge tag 'for-5.11/block-2020-12-14' of git://git.kernel.dk/linux-block Pull block updates from Jens Axboe: "Another series of killing more code than what is being added, again thanks to Christoph's relentless cleanups and tech debt tackling. This contains: - blk-iocost improvements (Baolin Wang) - part0 iostat fix (Jeffle Xu) - Disable iopoll for split bios (Jeffle Xu) - block tracepoint cleanups (Christoph Hellwig) - Merging of struct block_device and hd_struct (Christoph Hellwig) - Rework/cleanup of how block device sizes are updated (Christoph Hellwig) - Simplification of gendisk lookup and removal of block device aliasing (Christoph Hellwig) - Block device ioctl cleanups (Christoph Hellwig) - Removal of bdget()/blkdev_get() as exported API (Christoph Hellwig) - Disk change rework, avoid ->revalidate_disk() (Christoph Hellwig) - sbitmap improvements (Pavel Begunkov) - Hybrid polling fix (Pavel Begunkov) - bvec iteration improvements (Pavel Begunkov) - Zone revalidation fixes (Damien Le Moal) - blk-throttle limit fix (Yu Kuai) - Various little fixes" * tag 'for-5.11/block-2020-12-14' of git://git.kernel.dk/linux-block: (126 commits) blk-mq: fix msec comment from micro to milli seconds blk-mq: update arg in comment of blk_mq_map_queue blk-mq: add helper allocating tagset->tags Revert "block: Fix a lockdep complaint triggered by request queue flushing" nvme-loop: use blk_mq_hctx_set_fq_lock_class to set loop's lock class blk-mq: add new API of blk_mq_hctx_set_fq_lock_class block: disable iopoll for split bio block: Improve blk_revalidate_disk_zones() checks sbitmap: simplify wrap check sbitmap: replace CAS with atomic and sbitmap: remove swap_lock sbitmap: optimise sbitmap_deferred_clear() blk-mq: skip hybrid polling if iopoll doesn't spin blk-iocost: Factor out the base vrate change into a separate function blk-iocost: Factor out the active iocgs' state check into a separate function blk-iocost: Move the usage ratio calculation to the correct place blk-iocost: Remove unnecessary advance declaration blk-iocost: Fix some typos in comments blktrace: fix up a kerneldoc comment block: remove the request_queue to argument request based tracepoints ...
This commit is contained in:
commit
ac7ac4618c
10
block/bio.c
10
block/bio.c
|
@ -608,13 +608,13 @@ void bio_truncate(struct bio *bio, unsigned new_size)
|
|||
void guard_bio_eod(struct bio *bio)
|
||||
{
|
||||
sector_t maxsector;
|
||||
struct hd_struct *part;
|
||||
struct block_device *part;
|
||||
|
||||
rcu_read_lock();
|
||||
part = __disk_get_part(bio->bi_disk, bio->bi_partno);
|
||||
if (part)
|
||||
maxsector = part_nr_sects_read(part);
|
||||
else
|
||||
maxsector = bdev_nr_sectors(part);
|
||||
else
|
||||
maxsector = get_capacity(bio->bi_disk);
|
||||
rcu_read_unlock();
|
||||
|
||||
|
@ -1212,8 +1212,8 @@ void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
|
|||
|
||||
flush_dcache_page(dst_bv.bv_page);
|
||||
|
||||
bio_advance_iter(src, src_iter, bytes);
|
||||
bio_advance_iter(dst, dst_iter, bytes);
|
||||
bio_advance_iter_single(src, src_iter, bytes);
|
||||
bio_advance_iter_single(dst, dst_iter, bytes);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(bio_copy_data_iter);
|
||||
|
|
|
@ -556,22 +556,22 @@ static struct blkcg_gq *blkg_lookup_check(struct blkcg *blkcg,
|
|||
}
|
||||
|
||||
/**
|
||||
* blkg_conf_prep - parse and prepare for per-blkg config update
|
||||
* blkcg_conf_open_bdev - parse and open bdev for per-blkg config update
|
||||
* @inputp: input string pointer
|
||||
*
|
||||
* Parse the device node prefix part, MAJ:MIN, of per-blkg config update
|
||||
* from @input and get and return the matching gendisk. *@inputp is
|
||||
* from @input and get and return the matching bdev. *@inputp is
|
||||
* updated to point past the device node prefix. Returns an ERR_PTR()
|
||||
* value on error.
|
||||
*
|
||||
* Use this function iff blkg_conf_prep() can't be used for some reason.
|
||||
*/
|
||||
struct gendisk *blkcg_conf_get_disk(char **inputp)
|
||||
struct block_device *blkcg_conf_open_bdev(char **inputp)
|
||||
{
|
||||
char *input = *inputp;
|
||||
unsigned int major, minor;
|
||||
struct gendisk *disk;
|
||||
int key_len, part;
|
||||
struct block_device *bdev;
|
||||
int key_len;
|
||||
|
||||
if (sscanf(input, "%u:%u%n", &major, &minor, &key_len) != 2)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
@ -581,16 +581,16 @@ struct gendisk *blkcg_conf_get_disk(char **inputp)
|
|||
return ERR_PTR(-EINVAL);
|
||||
input = skip_spaces(input);
|
||||
|
||||
disk = get_gendisk(MKDEV(major, minor), &part);
|
||||
if (!disk)
|
||||
bdev = blkdev_get_no_open(MKDEV(major, minor));
|
||||
if (!bdev)
|
||||
return ERR_PTR(-ENODEV);
|
||||
if (part) {
|
||||
put_disk_and_module(disk);
|
||||
if (bdev_is_partition(bdev)) {
|
||||
blkdev_put_no_open(bdev);
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
|
||||
*inputp = input;
|
||||
return disk;
|
||||
return bdev;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -607,18 +607,18 @@ struct gendisk *blkcg_conf_get_disk(char **inputp)
|
|||
*/
|
||||
int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
|
||||
char *input, struct blkg_conf_ctx *ctx)
|
||||
__acquires(rcu) __acquires(&disk->queue->queue_lock)
|
||||
__acquires(rcu) __acquires(&bdev->bd_disk->queue->queue_lock)
|
||||
{
|
||||
struct gendisk *disk;
|
||||
struct block_device *bdev;
|
||||
struct request_queue *q;
|
||||
struct blkcg_gq *blkg;
|
||||
int ret;
|
||||
|
||||
disk = blkcg_conf_get_disk(&input);
|
||||
if (IS_ERR(disk))
|
||||
return PTR_ERR(disk);
|
||||
bdev = blkcg_conf_open_bdev(&input);
|
||||
if (IS_ERR(bdev))
|
||||
return PTR_ERR(bdev);
|
||||
|
||||
q = disk->queue;
|
||||
q = bdev->bd_disk->queue;
|
||||
|
||||
rcu_read_lock();
|
||||
spin_lock_irq(&q->queue_lock);
|
||||
|
@ -689,7 +689,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
|
|||
goto success;
|
||||
}
|
||||
success:
|
||||
ctx->disk = disk;
|
||||
ctx->bdev = bdev;
|
||||
ctx->blkg = blkg;
|
||||
ctx->body = input;
|
||||
return 0;
|
||||
|
@ -700,7 +700,7 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
|
|||
spin_unlock_irq(&q->queue_lock);
|
||||
rcu_read_unlock();
|
||||
fail:
|
||||
put_disk_and_module(disk);
|
||||
blkdev_put_no_open(bdev);
|
||||
/*
|
||||
* If queue was bypassing, we should retry. Do so after a
|
||||
* short msleep(). It isn't strictly necessary but queue
|
||||
|
@ -723,11 +723,11 @@ EXPORT_SYMBOL_GPL(blkg_conf_prep);
|
|||
* with blkg_conf_prep().
|
||||
*/
|
||||
void blkg_conf_finish(struct blkg_conf_ctx *ctx)
|
||||
__releases(&ctx->disk->queue->queue_lock) __releases(rcu)
|
||||
__releases(&ctx->bdev->bd_disk->queue->queue_lock) __releases(rcu)
|
||||
{
|
||||
spin_unlock_irq(&ctx->disk->queue->queue_lock);
|
||||
spin_unlock_irq(&ctx->bdev->bd_disk->queue->queue_lock);
|
||||
rcu_read_unlock();
|
||||
put_disk_and_module(ctx->disk);
|
||||
blkdev_put_no_open(ctx->bdev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blkg_conf_finish);
|
||||
|
||||
|
@ -820,9 +820,9 @@ static void blkcg_fill_root_iostats(void)
|
|||
|
||||
class_dev_iter_init(&iter, &block_class, NULL, &disk_type);
|
||||
while ((dev = class_dev_iter_next(&iter))) {
|
||||
struct gendisk *disk = dev_to_disk(dev);
|
||||
struct hd_struct *part = disk_get_part(disk, 0);
|
||||
struct blkcg_gq *blkg = blk_queue_root_blkg(disk->queue);
|
||||
struct block_device *bdev = dev_to_bdev(dev);
|
||||
struct blkcg_gq *blkg =
|
||||
blk_queue_root_blkg(bdev->bd_disk->queue);
|
||||
struct blkg_iostat tmp;
|
||||
int cpu;
|
||||
|
||||
|
@ -830,7 +830,7 @@ static void blkcg_fill_root_iostats(void)
|
|||
for_each_possible_cpu(cpu) {
|
||||
struct disk_stats *cpu_dkstats;
|
||||
|
||||
cpu_dkstats = per_cpu_ptr(part->dkstats, cpu);
|
||||
cpu_dkstats = per_cpu_ptr(bdev->bd_stats, cpu);
|
||||
tmp.ios[BLKG_IOSTAT_READ] +=
|
||||
cpu_dkstats->ios[STAT_READ];
|
||||
tmp.ios[BLKG_IOSTAT_WRITE] +=
|
||||
|
@ -849,7 +849,6 @@ static void blkcg_fill_root_iostats(void)
|
|||
blkg_iostat_set(&blkg->iostat.cur, &tmp);
|
||||
u64_stats_update_end(&blkg->iostat.sync);
|
||||
}
|
||||
disk_put_part(part);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -666,9 +666,9 @@ static int __init setup_fail_make_request(char *str)
|
|||
}
|
||||
__setup("fail_make_request=", setup_fail_make_request);
|
||||
|
||||
static bool should_fail_request(struct hd_struct *part, unsigned int bytes)
|
||||
static bool should_fail_request(struct block_device *part, unsigned int bytes)
|
||||
{
|
||||
return part->make_it_fail && should_fail(&fail_make_request, bytes);
|
||||
return part->bd_make_it_fail && should_fail(&fail_make_request, bytes);
|
||||
}
|
||||
|
||||
static int __init fail_make_request_debugfs(void)
|
||||
|
@ -683,7 +683,7 @@ late_initcall(fail_make_request_debugfs);
|
|||
|
||||
#else /* CONFIG_FAIL_MAKE_REQUEST */
|
||||
|
||||
static inline bool should_fail_request(struct hd_struct *part,
|
||||
static inline bool should_fail_request(struct block_device *part,
|
||||
unsigned int bytes)
|
||||
{
|
||||
return false;
|
||||
|
@ -691,11 +691,11 @@ static inline bool should_fail_request(struct hd_struct *part,
|
|||
|
||||
#endif /* CONFIG_FAIL_MAKE_REQUEST */
|
||||
|
||||
static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part)
|
||||
static inline bool bio_check_ro(struct bio *bio, struct block_device *part)
|
||||
{
|
||||
const int op = bio_op(bio);
|
||||
|
||||
if (part->policy && op_is_write(op)) {
|
||||
if (part->bd_read_only && op_is_write(op)) {
|
||||
char b[BDEVNAME_SIZE];
|
||||
|
||||
if (op_is_flush(bio->bi_opf) && !bio_sectors(bio))
|
||||
|
@ -703,7 +703,7 @@ static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part)
|
|||
|
||||
WARN_ONCE(1,
|
||||
"Trying to write to read-only block-device %s (partno %d)\n",
|
||||
bio_devname(bio, b), part->partno);
|
||||
bio_devname(bio, b), part->bd_partno);
|
||||
/* Older lvm-tools actually trigger this */
|
||||
return false;
|
||||
}
|
||||
|
@ -713,7 +713,7 @@ static inline bool bio_check_ro(struct bio *bio, struct hd_struct *part)
|
|||
|
||||
static noinline int should_fail_bio(struct bio *bio)
|
||||
{
|
||||
if (should_fail_request(&bio->bi_disk->part0, bio->bi_iter.bi_size))
|
||||
if (should_fail_request(bio->bi_disk->part0, bio->bi_iter.bi_size))
|
||||
return -EIO;
|
||||
return 0;
|
||||
}
|
||||
|
@ -742,7 +742,7 @@ static inline int bio_check_eod(struct bio *bio, sector_t maxsector)
|
|||
*/
|
||||
static inline int blk_partition_remap(struct bio *bio)
|
||||
{
|
||||
struct hd_struct *p;
|
||||
struct block_device *p;
|
||||
int ret = -EIO;
|
||||
|
||||
rcu_read_lock();
|
||||
|
@ -755,11 +755,12 @@ static inline int blk_partition_remap(struct bio *bio)
|
|||
goto out;
|
||||
|
||||
if (bio_sectors(bio)) {
|
||||
if (bio_check_eod(bio, part_nr_sects_read(p)))
|
||||
if (bio_check_eod(bio, bdev_nr_sectors(p)))
|
||||
goto out;
|
||||
bio->bi_iter.bi_sector += p->start_sect;
|
||||
trace_block_bio_remap(bio->bi_disk->queue, bio, part_devt(p),
|
||||
bio->bi_iter.bi_sector - p->start_sect);
|
||||
bio->bi_iter.bi_sector += p->bd_start_sect;
|
||||
trace_block_bio_remap(bio, p->bd_dev,
|
||||
bio->bi_iter.bi_sector -
|
||||
p->bd_start_sect);
|
||||
}
|
||||
bio->bi_partno = 0;
|
||||
ret = 0;
|
||||
|
@ -829,7 +830,7 @@ static noinline_for_stack bool submit_bio_checks(struct bio *bio)
|
|||
if (unlikely(blk_partition_remap(bio)))
|
||||
goto end_io;
|
||||
} else {
|
||||
if (unlikely(bio_check_ro(bio, &bio->bi_disk->part0)))
|
||||
if (unlikely(bio_check_ro(bio, bio->bi_disk->part0)))
|
||||
goto end_io;
|
||||
if (unlikely(bio_check_eod(bio, get_capacity(bio->bi_disk))))
|
||||
goto end_io;
|
||||
|
@ -906,7 +907,7 @@ static noinline_for_stack bool submit_bio_checks(struct bio *bio)
|
|||
blkcg_bio_issue_init(bio);
|
||||
|
||||
if (!bio_flagged(bio, BIO_TRACE_COMPLETION)) {
|
||||
trace_block_bio_queue(q, bio);
|
||||
trace_block_bio_queue(bio);
|
||||
/* Now that enqueuing has been traced, we need to trace
|
||||
* completion as well.
|
||||
*/
|
||||
|
@ -1201,7 +1202,7 @@ blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *
|
|||
return ret;
|
||||
|
||||
if (rq->rq_disk &&
|
||||
should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq)))
|
||||
should_fail_request(rq->rq_disk->part0, blk_rq_bytes(rq)))
|
||||
return BLK_STS_IOERR;
|
||||
|
||||
if (blk_crypto_insert_cloned_request(rq))
|
||||
|
@ -1260,17 +1261,18 @@ unsigned int blk_rq_err_bytes(const struct request *rq)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(blk_rq_err_bytes);
|
||||
|
||||
static void update_io_ticks(struct hd_struct *part, unsigned long now, bool end)
|
||||
static void update_io_ticks(struct block_device *part, unsigned long now,
|
||||
bool end)
|
||||
{
|
||||
unsigned long stamp;
|
||||
again:
|
||||
stamp = READ_ONCE(part->stamp);
|
||||
stamp = READ_ONCE(part->bd_stamp);
|
||||
if (unlikely(stamp != now)) {
|
||||
if (likely(cmpxchg(&part->stamp, stamp, now) == stamp))
|
||||
if (likely(cmpxchg(&part->bd_stamp, stamp, now) == stamp))
|
||||
__part_stat_add(part, io_ticks, end ? now - stamp : 1);
|
||||
}
|
||||
if (part->partno) {
|
||||
part = &part_to_disk(part)->part0;
|
||||
if (part->bd_partno) {
|
||||
part = bdev_whole(part);
|
||||
goto again;
|
||||
}
|
||||
}
|
||||
|
@ -1279,11 +1281,9 @@ static void blk_account_io_completion(struct request *req, unsigned int bytes)
|
|||
{
|
||||
if (req->part && blk_do_io_stat(req)) {
|
||||
const int sgrp = op_stat_group(req_op(req));
|
||||
struct hd_struct *part;
|
||||
|
||||
part_stat_lock();
|
||||
part = req->part;
|
||||
part_stat_add(part, sectors[sgrp], bytes >> 9);
|
||||
part_stat_add(req->part, sectors[sgrp], bytes >> 9);
|
||||
part_stat_unlock();
|
||||
}
|
||||
}
|
||||
|
@ -1298,17 +1298,12 @@ void blk_account_io_done(struct request *req, u64 now)
|
|||
if (req->part && blk_do_io_stat(req) &&
|
||||
!(req->rq_flags & RQF_FLUSH_SEQ)) {
|
||||
const int sgrp = op_stat_group(req_op(req));
|
||||
struct hd_struct *part;
|
||||
|
||||
part_stat_lock();
|
||||
part = req->part;
|
||||
|
||||
update_io_ticks(part, jiffies, true);
|
||||
part_stat_inc(part, ios[sgrp]);
|
||||
part_stat_add(part, nsecs[sgrp], now - req->start_time_ns);
|
||||
update_io_ticks(req->part, jiffies, true);
|
||||
part_stat_inc(req->part, ios[sgrp]);
|
||||
part_stat_add(req->part, nsecs[sgrp], now - req->start_time_ns);
|
||||
part_stat_unlock();
|
||||
|
||||
hd_struct_put(part);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1324,7 +1319,7 @@ void blk_account_io_start(struct request *rq)
|
|||
part_stat_unlock();
|
||||
}
|
||||
|
||||
static unsigned long __part_start_io_acct(struct hd_struct *part,
|
||||
static unsigned long __part_start_io_acct(struct block_device *part,
|
||||
unsigned int sectors, unsigned int op)
|
||||
{
|
||||
const int sgrp = op_stat_group(op);
|
||||
|
@ -1340,7 +1335,7 @@ static unsigned long __part_start_io_acct(struct hd_struct *part,
|
|||
return now;
|
||||
}
|
||||
|
||||
unsigned long part_start_io_acct(struct gendisk *disk, struct hd_struct **part,
|
||||
unsigned long part_start_io_acct(struct gendisk *disk, struct block_device **part,
|
||||
struct bio *bio)
|
||||
{
|
||||
*part = disk_map_sector_rcu(disk, bio->bi_iter.bi_sector);
|
||||
|
@ -1352,11 +1347,11 @@ EXPORT_SYMBOL_GPL(part_start_io_acct);
|
|||
unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors,
|
||||
unsigned int op)
|
||||
{
|
||||
return __part_start_io_acct(&disk->part0, sectors, op);
|
||||
return __part_start_io_acct(disk->part0, sectors, op);
|
||||
}
|
||||
EXPORT_SYMBOL(disk_start_io_acct);
|
||||
|
||||
static void __part_end_io_acct(struct hd_struct *part, unsigned int op,
|
||||
static void __part_end_io_acct(struct block_device *part, unsigned int op,
|
||||
unsigned long start_time)
|
||||
{
|
||||
const int sgrp = op_stat_group(op);
|
||||
|
@ -1370,18 +1365,17 @@ static void __part_end_io_acct(struct hd_struct *part, unsigned int op,
|
|||
part_stat_unlock();
|
||||
}
|
||||
|
||||
void part_end_io_acct(struct hd_struct *part, struct bio *bio,
|
||||
void part_end_io_acct(struct block_device *part, struct bio *bio,
|
||||
unsigned long start_time)
|
||||
{
|
||||
__part_end_io_acct(part, bio_op(bio), start_time);
|
||||
hd_struct_put(part);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(part_end_io_acct);
|
||||
|
||||
void disk_end_io_acct(struct gendisk *disk, unsigned int op,
|
||||
unsigned long start_time)
|
||||
{
|
||||
__part_end_io_acct(&disk->part0, op, start_time);
|
||||
__part_end_io_acct(disk->part0, op, start_time);
|
||||
}
|
||||
EXPORT_SYMBOL(disk_end_io_acct);
|
||||
|
||||
|
|
|
@ -69,7 +69,6 @@
|
|||
#include <linux/blkdev.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/blk-mq.h>
|
||||
#include <linux/lockdep.h>
|
||||
|
||||
#include "blk.h"
|
||||
#include "blk-mq.h"
|
||||
|
@ -139,7 +138,7 @@ static void blk_flush_queue_rq(struct request *rq, bool add_front)
|
|||
|
||||
static void blk_account_io_flush(struct request *rq)
|
||||
{
|
||||
struct hd_struct *part = &rq->rq_disk->part0;
|
||||
struct block_device *part = rq->rq_disk->part0;
|
||||
|
||||
part_stat_lock();
|
||||
part_stat_inc(part, ios[STAT_FLUSH]);
|
||||
|
@ -474,9 +473,6 @@ struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
|
|||
INIT_LIST_HEAD(&fq->flush_queue[1]);
|
||||
INIT_LIST_HEAD(&fq->flush_data_in_flight);
|
||||
|
||||
lockdep_register_key(&fq->key);
|
||||
lockdep_set_class(&fq->mq_flush_lock, &fq->key);
|
||||
|
||||
return fq;
|
||||
|
||||
fail_rq:
|
||||
|
@ -491,7 +487,31 @@ void blk_free_flush_queue(struct blk_flush_queue *fq)
|
|||
if (!fq)
|
||||
return;
|
||||
|
||||
lockdep_unregister_key(&fq->key);
|
||||
kfree(fq->flush_rq);
|
||||
kfree(fq);
|
||||
}
|
||||
|
||||
/*
|
||||
* Allow driver to set its own lock class to fq->mq_flush_lock for
|
||||
* avoiding lockdep complaint.
|
||||
*
|
||||
* flush_end_io() may be called recursively from some driver, such as
|
||||
* nvme-loop, so lockdep may complain 'possible recursive locking' because
|
||||
* all 'struct blk_flush_queue' instance share same mq_flush_lock lock class
|
||||
* key. We need to assign different lock class for these driver's
|
||||
* fq->mq_flush_lock for avoiding the lockdep warning.
|
||||
*
|
||||
* Use dynamically allocated lock class key for each 'blk_flush_queue'
|
||||
* instance is over-kill, and more worse it introduces horrible boot delay
|
||||
* issue because synchronize_rcu() is implied in lockdep_unregister_key which
|
||||
* is called for each hctx release. SCSI probing may synchronously create and
|
||||
* destroy lots of MQ request_queues for non-existent devices, and some robot
|
||||
* test kernel always enable lockdep option. It is observed that more than half
|
||||
* an hour is taken during SCSI MQ probe with per-fq lock class.
|
||||
*/
|
||||
void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx,
|
||||
struct lock_class_key *key)
|
||||
{
|
||||
lockdep_set_class(&hctx->fq->mq_flush_lock, key);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_mq_hctx_set_fq_lock_class);
|
||||
|
|
|
@ -39,7 +39,7 @@
|
|||
* On top of that, a size cost proportional to the length of the IO is
|
||||
* added. While simple, this model captures the operational
|
||||
* characteristics of a wide varienty of devices well enough. Default
|
||||
* paramters for several different classes of devices are provided and the
|
||||
* parameters for several different classes of devices are provided and the
|
||||
* parameters can be configured from userspace via
|
||||
* /sys/fs/cgroup/io.cost.model.
|
||||
*
|
||||
|
@ -77,7 +77,7 @@
|
|||
*
|
||||
* This constitutes the basis of IO capacity distribution. Each cgroup's
|
||||
* vtime is running at a rate determined by its hweight. A cgroup tracks
|
||||
* the vtime consumed by past IOs and can issue a new IO iff doing so
|
||||
* the vtime consumed by past IOs and can issue a new IO if doing so
|
||||
* wouldn't outrun the current device vtime. Otherwise, the IO is
|
||||
* suspended until the vtime has progressed enough to cover it.
|
||||
*
|
||||
|
@ -155,7 +155,7 @@
|
|||
* Instead of debugfs or other clumsy monitoring mechanisms, this
|
||||
* controller uses a drgn based monitoring script -
|
||||
* tools/cgroup/iocost_monitor.py. For details on drgn, please see
|
||||
* https://github.com/osandov/drgn. The ouput looks like the following.
|
||||
* https://github.com/osandov/drgn. The output looks like the following.
|
||||
*
|
||||
* sdb RUN per=300ms cur_per=234.218:v203.695 busy= +1 vrate= 62.12%
|
||||
* active weight hweight% inflt% dbt delay usages%
|
||||
|
@ -370,8 +370,6 @@ enum {
|
|||
AUTOP_SSD_FAST,
|
||||
};
|
||||
|
||||
struct ioc_gq;
|
||||
|
||||
struct ioc_params {
|
||||
u32 qos[NR_QOS_PARAMS];
|
||||
u64 i_lcoefs[NR_I_LCOEFS];
|
||||
|
@ -492,7 +490,7 @@ struct ioc_gq {
|
|||
/*
|
||||
* `vtime` is this iocg's vtime cursor which progresses as IOs are
|
||||
* issued. If lagging behind device vtime, the delta represents
|
||||
* the currently available IO budget. If runnning ahead, the
|
||||
* the currently available IO budget. If running ahead, the
|
||||
* overage.
|
||||
*
|
||||
* `vtime_done` is the same but progressed on completion rather
|
||||
|
@ -973,6 +971,58 @@ static void ioc_refresh_vrate(struct ioc *ioc, struct ioc_now *now)
|
|||
ioc->vtime_err = clamp(ioc->vtime_err, -vperiod, vperiod);
|
||||
}
|
||||
|
||||
static void ioc_adjust_base_vrate(struct ioc *ioc, u32 rq_wait_pct,
|
||||
int nr_lagging, int nr_shortages,
|
||||
int prev_busy_level, u32 *missed_ppm)
|
||||
{
|
||||
u64 vrate = ioc->vtime_base_rate;
|
||||
u64 vrate_min = ioc->vrate_min, vrate_max = ioc->vrate_max;
|
||||
|
||||
if (!ioc->busy_level || (ioc->busy_level < 0 && nr_lagging)) {
|
||||
if (ioc->busy_level != prev_busy_level || nr_lagging)
|
||||
trace_iocost_ioc_vrate_adj(ioc, atomic64_read(&ioc->vtime_rate),
|
||||
missed_ppm, rq_wait_pct,
|
||||
nr_lagging, nr_shortages);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/* rq_wait signal is always reliable, ignore user vrate_min */
|
||||
if (rq_wait_pct > RQ_WAIT_BUSY_PCT)
|
||||
vrate_min = VRATE_MIN;
|
||||
|
||||
/*
|
||||
* If vrate is out of bounds, apply clamp gradually as the
|
||||
* bounds can change abruptly. Otherwise, apply busy_level
|
||||
* based adjustment.
|
||||
*/
|
||||
if (vrate < vrate_min) {
|
||||
vrate = div64_u64(vrate * (100 + VRATE_CLAMP_ADJ_PCT), 100);
|
||||
vrate = min(vrate, vrate_min);
|
||||
} else if (vrate > vrate_max) {
|
||||
vrate = div64_u64(vrate * (100 - VRATE_CLAMP_ADJ_PCT), 100);
|
||||
vrate = max(vrate, vrate_max);
|
||||
} else {
|
||||
int idx = min_t(int, abs(ioc->busy_level),
|
||||
ARRAY_SIZE(vrate_adj_pct) - 1);
|
||||
u32 adj_pct = vrate_adj_pct[idx];
|
||||
|
||||
if (ioc->busy_level > 0)
|
||||
adj_pct = 100 - adj_pct;
|
||||
else
|
||||
adj_pct = 100 + adj_pct;
|
||||
|
||||
vrate = clamp(DIV64_U64_ROUND_UP(vrate * adj_pct, 100),
|
||||
vrate_min, vrate_max);
|
||||
}
|
||||
|
||||
trace_iocost_ioc_vrate_adj(ioc, vrate, missed_ppm, rq_wait_pct,
|
||||
nr_lagging, nr_shortages);
|
||||
|
||||
ioc->vtime_base_rate = vrate;
|
||||
ioc_refresh_margins(ioc);
|
||||
}
|
||||
|
||||
/* take a snapshot of the current [v]time and vrate */
|
||||
static void ioc_now(struct ioc *ioc, struct ioc_now *now)
|
||||
{
|
||||
|
@ -1046,7 +1096,7 @@ static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse,
|
|||
|
||||
/*
|
||||
* The delta between inuse and active sums indicates that
|
||||
* that much of weight is being given away. Parent's inuse
|
||||
* much of weight is being given away. Parent's inuse
|
||||
* and active should reflect the ratio.
|
||||
*/
|
||||
if (parent->child_active_sum) {
|
||||
|
@ -2071,13 +2121,88 @@ static void ioc_forgive_debts(struct ioc *ioc, u64 usage_us_sum, int nr_debtors,
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Check the active iocgs' state to avoid oversleeping and deactive
|
||||
* idle iocgs.
|
||||
*
|
||||
* Since waiters determine the sleep durations based on the vrate
|
||||
* they saw at the time of sleep, if vrate has increased, some
|
||||
* waiters could be sleeping for too long. Wake up tardy waiters
|
||||
* which should have woken up in the last period and expire idle
|
||||
* iocgs.
|
||||
*/
|
||||
static int ioc_check_iocgs(struct ioc *ioc, struct ioc_now *now)
|
||||
{
|
||||
int nr_debtors = 0;
|
||||
struct ioc_gq *iocg, *tiocg;
|
||||
|
||||
list_for_each_entry_safe(iocg, tiocg, &ioc->active_iocgs, active_list) {
|
||||
if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
|
||||
!iocg->delay && !iocg_is_idle(iocg))
|
||||
continue;
|
||||
|
||||
spin_lock(&iocg->waitq.lock);
|
||||
|
||||
/* flush wait and indebt stat deltas */
|
||||
if (iocg->wait_since) {
|
||||
iocg->local_stat.wait_us += now->now - iocg->wait_since;
|
||||
iocg->wait_since = now->now;
|
||||
}
|
||||
if (iocg->indebt_since) {
|
||||
iocg->local_stat.indebt_us +=
|
||||
now->now - iocg->indebt_since;
|
||||
iocg->indebt_since = now->now;
|
||||
}
|
||||
if (iocg->indelay_since) {
|
||||
iocg->local_stat.indelay_us +=
|
||||
now->now - iocg->indelay_since;
|
||||
iocg->indelay_since = now->now;
|
||||
}
|
||||
|
||||
if (waitqueue_active(&iocg->waitq) || iocg->abs_vdebt ||
|
||||
iocg->delay) {
|
||||
/* might be oversleeping vtime / hweight changes, kick */
|
||||
iocg_kick_waitq(iocg, true, now);
|
||||
if (iocg->abs_vdebt || iocg->delay)
|
||||
nr_debtors++;
|
||||
} else if (iocg_is_idle(iocg)) {
|
||||
/* no waiter and idle, deactivate */
|
||||
u64 vtime = atomic64_read(&iocg->vtime);
|
||||
s64 excess;
|
||||
|
||||
/*
|
||||
* @iocg has been inactive for a full duration and will
|
||||
* have a high budget. Account anything above target as
|
||||
* error and throw away. On reactivation, it'll start
|
||||
* with the target budget.
|
||||
*/
|
||||
excess = now->vnow - vtime - ioc->margins.target;
|
||||
if (excess > 0) {
|
||||
u32 old_hwi;
|
||||
|
||||
current_hweight(iocg, NULL, &old_hwi);
|
||||
ioc->vtime_err -= div64_u64(excess * old_hwi,
|
||||
WEIGHT_ONE);
|
||||
}
|
||||
|
||||
__propagate_weights(iocg, 0, 0, false, now);
|
||||
list_del_init(&iocg->active_list);
|
||||
}
|
||||
|
||||
spin_unlock(&iocg->waitq.lock);
|
||||
}
|
||||
|
||||
commit_weights(ioc);
|
||||
return nr_debtors;
|
||||
}
|
||||
|
||||
static void ioc_timer_fn(struct timer_list *timer)
|
||||
{
|
||||
struct ioc *ioc = container_of(timer, struct ioc, timer);
|
||||
struct ioc_gq *iocg, *tiocg;
|
||||
struct ioc_now now;
|
||||
LIST_HEAD(surpluses);
|
||||
int nr_debtors = 0, nr_shortages = 0, nr_lagging = 0;
|
||||
int nr_debtors, nr_shortages = 0, nr_lagging = 0;
|
||||
u64 usage_us_sum = 0;
|
||||
u32 ppm_rthr = MILLION - ioc->params.qos[QOS_RPPM];
|
||||
u32 ppm_wthr = MILLION - ioc->params.qos[QOS_WPPM];
|
||||
|
@ -2099,68 +2224,7 @@ static void ioc_timer_fn(struct timer_list *timer)
|
|||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Waiters determine the sleep durations based on the vrate they
|
||||
* saw at the time of sleep. If vrate has increased, some waiters
|
||||
* could be sleeping for too long. Wake up tardy waiters which
|
||||
* should have woken up in the last period and expire idle iocgs.
|
||||
*/
|
||||
list_for_each_entry_safe(iocg, tiocg, &ioc->active_iocgs, active_list) {
|
||||
if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt &&
|
||||
!iocg->delay && !iocg_is_idle(iocg))
|
||||
continue;
|
||||
|
||||
spin_lock(&iocg->waitq.lock);
|
||||
|
||||
/* flush wait and indebt stat deltas */
|
||||
if (iocg->wait_since) {
|
||||
iocg->local_stat.wait_us += now.now - iocg->wait_since;
|
||||
iocg->wait_since = now.now;
|
||||
}
|
||||
if (iocg->indebt_since) {
|
||||
iocg->local_stat.indebt_us +=
|
||||
now.now - iocg->indebt_since;
|
||||
iocg->indebt_since = now.now;
|
||||
}
|
||||
if (iocg->indelay_since) {
|
||||
iocg->local_stat.indelay_us +=
|
||||
now.now - iocg->indelay_since;
|
||||
iocg->indelay_since = now.now;
|
||||
}
|
||||
|
||||
if (waitqueue_active(&iocg->waitq) || iocg->abs_vdebt ||
|
||||
iocg->delay) {
|
||||
/* might be oversleeping vtime / hweight changes, kick */
|
||||
iocg_kick_waitq(iocg, true, &now);
|
||||
if (iocg->abs_vdebt || iocg->delay)
|
||||
nr_debtors++;
|
||||
} else if (iocg_is_idle(iocg)) {
|
||||
/* no waiter and idle, deactivate */
|
||||
u64 vtime = atomic64_read(&iocg->vtime);
|
||||
s64 excess;
|
||||
|
||||
/*
|
||||
* @iocg has been inactive for a full duration and will
|
||||
* have a high budget. Account anything above target as
|
||||
* error and throw away. On reactivation, it'll start
|
||||
* with the target budget.
|
||||
*/
|
||||
excess = now.vnow - vtime - ioc->margins.target;
|
||||
if (excess > 0) {
|
||||
u32 old_hwi;
|
||||
|
||||
current_hweight(iocg, NULL, &old_hwi);
|
||||
ioc->vtime_err -= div64_u64(excess * old_hwi,
|
||||
WEIGHT_ONE);
|
||||
}
|
||||
|
||||
__propagate_weights(iocg, 0, 0, false, &now);
|
||||
list_del_init(&iocg->active_list);
|
||||
}
|
||||
|
||||
spin_unlock(&iocg->waitq.lock);
|
||||
}
|
||||
commit_weights(ioc);
|
||||
nr_debtors = ioc_check_iocgs(ioc, &now);
|
||||
|
||||
/*
|
||||
* Wait and indebt stat are flushed above and the donation calculation
|
||||
|
@ -2170,8 +2234,8 @@ static void ioc_timer_fn(struct timer_list *timer)
|
|||
|
||||
/* calc usage and see whether some weights need to be moved around */
|
||||
list_for_each_entry(iocg, &ioc->active_iocgs, active_list) {
|
||||
u64 vdone, vtime, usage_us, usage_dur;
|
||||
u32 usage, hw_active, hw_inuse;
|
||||
u64 vdone, vtime, usage_us;
|
||||
u32 hw_active, hw_inuse;
|
||||
|
||||
/*
|
||||
* Collect unused and wind vtime closer to vnow to prevent
|
||||
|
@ -2202,30 +2266,32 @@ static void ioc_timer_fn(struct timer_list *timer)
|
|||
usage_us = iocg->usage_delta_us;
|
||||
usage_us_sum += usage_us;
|
||||
|
||||
if (vdone != vtime) {
|
||||
u64 inflight_us = DIV64_U64_ROUND_UP(
|
||||
cost_to_abs_cost(vtime - vdone, hw_inuse),
|
||||
ioc->vtime_base_rate);
|
||||
usage_us = max(usage_us, inflight_us);
|
||||
}
|
||||
|
||||
/* convert to hweight based usage ratio */
|
||||
if (time_after64(iocg->activated_at, ioc->period_at))
|
||||
usage_dur = max_t(u64, now.now - iocg->activated_at, 1);
|
||||
else
|
||||
usage_dur = max_t(u64, now.now - ioc->period_at, 1);
|
||||
|
||||
usage = clamp_t(u32,
|
||||
DIV64_U64_ROUND_UP(usage_us * WEIGHT_ONE,
|
||||
usage_dur),
|
||||
1, WEIGHT_ONE);
|
||||
|
||||
/* see whether there's surplus vtime */
|
||||
WARN_ON_ONCE(!list_empty(&iocg->surplus_list));
|
||||
if (hw_inuse < hw_active ||
|
||||
(!waitqueue_active(&iocg->waitq) &&
|
||||
time_before64(vtime, now.vnow - ioc->margins.low))) {
|
||||
u32 hwa, old_hwi, hwm, new_hwi;
|
||||
u32 hwa, old_hwi, hwm, new_hwi, usage;
|
||||
u64 usage_dur;
|
||||
|
||||
if (vdone != vtime) {
|
||||
u64 inflight_us = DIV64_U64_ROUND_UP(
|
||||
cost_to_abs_cost(vtime - vdone, hw_inuse),
|
||||
ioc->vtime_base_rate);
|
||||
|
||||
usage_us = max(usage_us, inflight_us);
|
||||
}
|
||||
|
||||
/* convert to hweight based usage ratio */
|
||||
if (time_after64(iocg->activated_at, ioc->period_at))
|
||||
usage_dur = max_t(u64, now.now - iocg->activated_at, 1);
|
||||
else
|
||||
usage_dur = max_t(u64, now.now - ioc->period_at, 1);
|
||||
|
||||
usage = clamp_t(u32,
|
||||
DIV64_U64_ROUND_UP(usage_us * WEIGHT_ONE,
|
||||
usage_dur),
|
||||
1, WEIGHT_ONE);
|
||||
|
||||
/*
|
||||
* Already donating or accumulated enough to start.
|
||||
|
@ -2309,51 +2375,8 @@ static void ioc_timer_fn(struct timer_list *timer)
|
|||
|
||||
ioc->busy_level = clamp(ioc->busy_level, -1000, 1000);
|
||||
|
||||
if (ioc->busy_level > 0 || (ioc->busy_level < 0 && !nr_lagging)) {
|
||||
u64 vrate = ioc->vtime_base_rate;
|
||||
u64 vrate_min = ioc->vrate_min, vrate_max = ioc->vrate_max;
|
||||
|
||||
/* rq_wait signal is always reliable, ignore user vrate_min */
|
||||
if (rq_wait_pct > RQ_WAIT_BUSY_PCT)
|
||||
vrate_min = VRATE_MIN;
|
||||
|
||||
/*
|
||||
* If vrate is out of bounds, apply clamp gradually as the
|
||||
* bounds can change abruptly. Otherwise, apply busy_level
|
||||
* based adjustment.
|
||||
*/
|
||||
if (vrate < vrate_min) {
|
||||
vrate = div64_u64(vrate * (100 + VRATE_CLAMP_ADJ_PCT),
|
||||
100);
|
||||
vrate = min(vrate, vrate_min);
|
||||
} else if (vrate > vrate_max) {
|
||||
vrate = div64_u64(vrate * (100 - VRATE_CLAMP_ADJ_PCT),
|
||||
100);
|
||||
vrate = max(vrate, vrate_max);
|
||||
} else {
|
||||
int idx = min_t(int, abs(ioc->busy_level),
|
||||
ARRAY_SIZE(vrate_adj_pct) - 1);
|
||||
u32 adj_pct = vrate_adj_pct[idx];
|
||||
|
||||
if (ioc->busy_level > 0)
|
||||
adj_pct = 100 - adj_pct;
|
||||
else
|
||||
adj_pct = 100 + adj_pct;
|
||||
|
||||
vrate = clamp(DIV64_U64_ROUND_UP(vrate * adj_pct, 100),
|
||||
vrate_min, vrate_max);
|
||||
}
|
||||
|
||||
trace_iocost_ioc_vrate_adj(ioc, vrate, missed_ppm, rq_wait_pct,
|
||||
nr_lagging, nr_shortages);
|
||||
|
||||
ioc->vtime_base_rate = vrate;
|
||||
ioc_refresh_margins(ioc);
|
||||
} else if (ioc->busy_level != prev_busy_level || nr_lagging) {
|
||||
trace_iocost_ioc_vrate_adj(ioc, atomic64_read(&ioc->vtime_rate),
|
||||
missed_ppm, rq_wait_pct, nr_lagging,
|
||||
nr_shortages);
|
||||
}
|
||||
ioc_adjust_base_vrate(ioc, rq_wait_pct, nr_lagging, nr_shortages,
|
||||
prev_busy_level, missed_ppm);
|
||||
|
||||
ioc_refresh_params(ioc, false);
|
||||
|
||||
|
@ -2400,7 +2423,7 @@ static u64 adjust_inuse_and_calc_cost(struct ioc_gq *iocg, u64 vtime,
|
|||
return cost;
|
||||
|
||||
/*
|
||||
* We only increase inuse during period and do so iff the margin has
|
||||
* We only increase inuse during period and do so if the margin has
|
||||
* deteriorated since the previous adjustment.
|
||||
*/
|
||||
if (margin >= iocg->saved_margin || margin >= margins->low ||
|
||||
|
@ -3120,23 +3143,23 @@ static const match_table_t qos_tokens = {
|
|||
static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input,
|
||||
size_t nbytes, loff_t off)
|
||||
{
|
||||
struct gendisk *disk;
|
||||
struct block_device *bdev;
|
||||
struct ioc *ioc;
|
||||
u32 qos[NR_QOS_PARAMS];
|
||||
bool enable, user;
|
||||
char *p;
|
||||
int ret;
|
||||
|
||||
disk = blkcg_conf_get_disk(&input);
|
||||
if (IS_ERR(disk))
|
||||
return PTR_ERR(disk);
|
||||
bdev = blkcg_conf_open_bdev(&input);
|
||||
if (IS_ERR(bdev))
|
||||
return PTR_ERR(bdev);
|
||||
|
||||
ioc = q_to_ioc(disk->queue);
|
||||
ioc = q_to_ioc(bdev->bd_disk->queue);
|
||||
if (!ioc) {
|
||||
ret = blk_iocost_init(disk->queue);
|
||||
ret = blk_iocost_init(bdev->bd_disk->queue);
|
||||
if (ret)
|
||||
goto err;
|
||||
ioc = q_to_ioc(disk->queue);
|
||||
ioc = q_to_ioc(bdev->bd_disk->queue);
|
||||
}
|
||||
|
||||
spin_lock_irq(&ioc->lock);
|
||||
|
@ -3231,12 +3254,12 @@ static ssize_t ioc_qos_write(struct kernfs_open_file *of, char *input,
|
|||
ioc_refresh_params(ioc, true);
|
||||
spin_unlock_irq(&ioc->lock);
|
||||
|
||||
put_disk_and_module(disk);
|
||||
blkdev_put_no_open(bdev);
|
||||
return nbytes;
|
||||
einval:
|
||||
ret = -EINVAL;
|
||||
err:
|
||||
put_disk_and_module(disk);
|
||||
blkdev_put_no_open(bdev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -3287,23 +3310,23 @@ static const match_table_t i_lcoef_tokens = {
|
|||
static ssize_t ioc_cost_model_write(struct kernfs_open_file *of, char *input,
|
||||
size_t nbytes, loff_t off)
|
||||
{
|
||||
struct gendisk *disk;
|
||||
struct block_device *bdev;
|
||||
struct ioc *ioc;
|
||||
u64 u[NR_I_LCOEFS];
|
||||
bool user;
|
||||
char *p;
|
||||
int ret;
|
||||
|
||||
disk = blkcg_conf_get_disk(&input);
|
||||
if (IS_ERR(disk))
|
||||
return PTR_ERR(disk);
|
||||
bdev = blkcg_conf_open_bdev(&input);
|
||||
if (IS_ERR(bdev))
|
||||
return PTR_ERR(bdev);
|
||||
|
||||
ioc = q_to_ioc(disk->queue);
|
||||
ioc = q_to_ioc(bdev->bd_disk->queue);
|
||||
if (!ioc) {
|
||||
ret = blk_iocost_init(disk->queue);
|
||||
ret = blk_iocost_init(bdev->bd_disk->queue);
|
||||
if (ret)
|
||||
goto err;
|
||||
ioc = q_to_ioc(disk->queue);
|
||||
ioc = q_to_ioc(bdev->bd_disk->queue);
|
||||
}
|
||||
|
||||
spin_lock_irq(&ioc->lock);
|
||||
|
@ -3356,13 +3379,13 @@ static ssize_t ioc_cost_model_write(struct kernfs_open_file *of, char *input,
|
|||
ioc_refresh_params(ioc, true);
|
||||
spin_unlock_irq(&ioc->lock);
|
||||
|
||||
put_disk_and_module(disk);
|
||||
blkdev_put_no_open(bdev);
|
||||
return nbytes;
|
||||
|
||||
einval:
|
||||
ret = -EINVAL;
|
||||
err:
|
||||
put_disk_and_module(disk);
|
||||
blkdev_put_no_open(bdev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -65,7 +65,7 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
|
|||
|
||||
/* In case the discard request is in a partition */
|
||||
if (bdev_is_partition(bdev))
|
||||
part_offset = bdev->bd_part->start_sect;
|
||||
part_offset = bdev->bd_start_sect;
|
||||
|
||||
while (nr_sects) {
|
||||
sector_t granularity_aligned_lba, req_sects;
|
||||
|
|
|
@ -279,6 +279,14 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
|
|||
return NULL;
|
||||
split:
|
||||
*segs = nsegs;
|
||||
|
||||
/*
|
||||
* Bio splitting may cause subtle trouble such as hang when doing sync
|
||||
* iopoll in direct IO routine. Given performance gain of iopoll for
|
||||
* big IO can be trival, disable iopoll when split needed.
|
||||
*/
|
||||
bio->bi_opf &= ~REQ_HIPRI;
|
||||
|
||||
return bio_split(bio, sectors, GFP_NOIO, bs);
|
||||
}
|
||||
|
||||
|
@ -338,7 +346,7 @@ void __blk_queue_split(struct bio **bio, unsigned int *nr_segs)
|
|||
split->bi_opf |= REQ_NOMERGE;
|
||||
|
||||
bio_chain(split, *bio);
|
||||
trace_block_split(q, split, (*bio)->bi_iter.bi_sector);
|
||||
trace_block_split(split, (*bio)->bi_iter.bi_sector);
|
||||
submit_bio_noacct(*bio);
|
||||
*bio = split;
|
||||
}
|
||||
|
@ -683,8 +691,6 @@ static void blk_account_io_merge_request(struct request *req)
|
|||
part_stat_lock();
|
||||
part_stat_inc(req->part, merges[op_stat_group(req_op(req))]);
|
||||
part_stat_unlock();
|
||||
|
||||
hd_struct_put(req->part);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -801,7 +807,7 @@ static struct request *attempt_merge(struct request_queue *q,
|
|||
*/
|
||||
blk_account_io_merge_request(next);
|
||||
|
||||
trace_block_rq_merge(q, next);
|
||||
trace_block_rq_merge(next);
|
||||
|
||||
/*
|
||||
* ownership of bio passed from next to req, return 'next' for
|
||||
|
@ -924,7 +930,7 @@ static enum bio_merge_status bio_attempt_back_merge(struct request *req,
|
|||
if (!ll_back_merge_fn(req, bio, nr_segs))
|
||||
return BIO_MERGE_FAILED;
|
||||
|
||||
trace_block_bio_backmerge(req->q, req, bio);
|
||||
trace_block_bio_backmerge(bio);
|
||||
rq_qos_merge(req->q, req, bio);
|
||||
|
||||
if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
|
||||
|
@ -948,7 +954,7 @@ static enum bio_merge_status bio_attempt_front_merge(struct request *req,
|
|||
if (!ll_front_merge_fn(req, bio, nr_segs))
|
||||
return BIO_MERGE_FAILED;
|
||||
|
||||
trace_block_bio_frontmerge(req->q, req, bio);
|
||||
trace_block_bio_frontmerge(bio);
|
||||
rq_qos_merge(req->q, req, bio);
|
||||
|
||||
if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff)
|
||||
|
|
|
@ -386,7 +386,7 @@ EXPORT_SYMBOL_GPL(blk_mq_sched_try_insert_merge);
|
|||
|
||||
void blk_mq_sched_request_inserted(struct request *rq)
|
||||
{
|
||||
trace_block_rq_insert(rq->q, rq);
|
||||
trace_block_rq_insert(rq);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_mq_sched_request_inserted);
|
||||
|
||||
|
|
|
@ -95,7 +95,7 @@ static void blk_mq_hctx_clear_pending(struct blk_mq_hw_ctx *hctx,
|
|||
}
|
||||
|
||||
struct mq_inflight {
|
||||
struct hd_struct *part;
|
||||
struct block_device *part;
|
||||
unsigned int inflight[2];
|
||||
};
|
||||
|
||||
|
@ -105,13 +105,15 @@ static bool blk_mq_check_inflight(struct blk_mq_hw_ctx *hctx,
|
|||
{
|
||||
struct mq_inflight *mi = priv;
|
||||
|
||||
if (rq->part == mi->part && blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT)
|
||||
if ((!mi->part->bd_partno || rq->part == mi->part) &&
|
||||
blk_mq_rq_state(rq) == MQ_RQ_IN_FLIGHT)
|
||||
mi->inflight[rq_data_dir(rq)]++;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
unsigned int blk_mq_in_flight(struct request_queue *q, struct hd_struct *part)
|
||||
unsigned int blk_mq_in_flight(struct request_queue *q,
|
||||
struct block_device *part)
|
||||
{
|
||||
struct mq_inflight mi = { .part = part };
|
||||
|
||||
|
@ -120,8 +122,8 @@ unsigned int blk_mq_in_flight(struct request_queue *q, struct hd_struct *part)
|
|||
return mi.inflight[0] + mi.inflight[1];
|
||||
}
|
||||
|
||||
void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
|
||||
unsigned int inflight[2])
|
||||
void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
|
||||
unsigned int inflight[2])
|
||||
{
|
||||
struct mq_inflight mi = { .part = part };
|
||||
|
||||
|
@ -729,7 +731,7 @@ void blk_mq_start_request(struct request *rq)
|
|||
{
|
||||
struct request_queue *q = rq->q;
|
||||
|
||||
trace_block_rq_issue(q, rq);
|
||||
trace_block_rq_issue(rq);
|
||||
|
||||
if (test_bit(QUEUE_FLAG_STATS, &q->queue_flags)) {
|
||||
rq->io_start_time_ns = ktime_get_ns();
|
||||
|
@ -756,7 +758,7 @@ static void __blk_mq_requeue_request(struct request *rq)
|
|||
|
||||
blk_mq_put_driver_tag(rq);
|
||||
|
||||
trace_block_rq_requeue(q, rq);
|
||||
trace_block_rq_requeue(rq);
|
||||
rq_qos_requeue(q, rq);
|
||||
|
||||
if (blk_mq_request_started(rq)) {
|
||||
|
@ -1590,7 +1592,7 @@ static int blk_mq_hctx_next_cpu(struct blk_mq_hw_ctx *hctx)
|
|||
* __blk_mq_delay_run_hw_queue - Run (or schedule to run) a hardware queue.
|
||||
* @hctx: Pointer to the hardware queue to run.
|
||||
* @async: If we want to run the queue asynchronously.
|
||||
* @msecs: Microseconds of delay to wait before running the queue.
|
||||
* @msecs: Milliseconds of delay to wait before running the queue.
|
||||
*
|
||||
* If !@async, try to run the queue now. Else, run the queue asynchronously and
|
||||
* with a delay of @msecs.
|
||||
|
@ -1619,7 +1621,7 @@ static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
|
|||
/**
|
||||
* blk_mq_delay_run_hw_queue - Run a hardware queue asynchronously.
|
||||
* @hctx: Pointer to the hardware queue to run.
|
||||
* @msecs: Microseconds of delay to wait before running the queue.
|
||||
* @msecs: Milliseconds of delay to wait before running the queue.
|
||||
*
|
||||
* Run a hardware queue asynchronously with a delay of @msecs.
|
||||
*/
|
||||
|
@ -1683,7 +1685,7 @@ EXPORT_SYMBOL(blk_mq_run_hw_queues);
|
|||
/**
|
||||
* blk_mq_delay_run_hw_queues - Run all hardware queues asynchronously.
|
||||
* @q: Pointer to the request queue to run.
|
||||
* @msecs: Microseconds of delay to wait before running the queues.
|
||||
* @msecs: Milliseconds of delay to wait before running the queues.
|
||||
*/
|
||||
void blk_mq_delay_run_hw_queues(struct request_queue *q, unsigned long msecs)
|
||||
{
|
||||
|
@ -1817,7 +1819,7 @@ static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
|
|||
|
||||
lockdep_assert_held(&ctx->lock);
|
||||
|
||||
trace_block_rq_insert(hctx->queue, rq);
|
||||
trace_block_rq_insert(rq);
|
||||
|
||||
if (at_head)
|
||||
list_add(&rq->queuelist, &ctx->rq_lists[type]);
|
||||
|
@ -1874,7 +1876,7 @@ void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
|
|||
*/
|
||||
list_for_each_entry(rq, list, queuelist) {
|
||||
BUG_ON(rq->mq_ctx != ctx);
|
||||
trace_block_rq_insert(hctx->queue, rq);
|
||||
trace_block_rq_insert(rq);
|
||||
}
|
||||
|
||||
spin_lock(&ctx->lock);
|
||||
|
@ -2155,6 +2157,7 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
|
|||
unsigned int nr_segs;
|
||||
blk_qc_t cookie;
|
||||
blk_status_t ret;
|
||||
bool hipri;
|
||||
|
||||
blk_queue_bounce(q, &bio);
|
||||
__blk_queue_split(&bio, &nr_segs);
|
||||
|
@ -2171,6 +2174,8 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
|
|||
|
||||
rq_qos_throttle(q, bio);
|
||||
|
||||
hipri = bio->bi_opf & REQ_HIPRI;
|
||||
|
||||
data.cmd_flags = bio->bi_opf;
|
||||
rq = __blk_mq_alloc_request(&data);
|
||||
if (unlikely(!rq)) {
|
||||
|
@ -2180,7 +2185,7 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
|
|||
goto queue_exit;
|
||||
}
|
||||
|
||||
trace_block_getrq(q, bio, bio->bi_opf);
|
||||
trace_block_getrq(bio);
|
||||
|
||||
rq_qos_track(q, rq, bio);
|
||||
|
||||
|
@ -2263,6 +2268,8 @@ blk_qc_t blk_mq_submit_bio(struct bio *bio)
|
|||
blk_mq_sched_insert_request(rq, false, true, true);
|
||||
}
|
||||
|
||||
if (!hipri)
|
||||
return BLK_QC_T_NONE;
|
||||
return cookie;
|
||||
queue_exit:
|
||||
blk_queue_exit(q);
|
||||
|
@ -3373,6 +3380,12 @@ static int blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set *set,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int blk_mq_alloc_tag_set_tags(struct blk_mq_tag_set *set,
|
||||
int new_nr_hw_queues)
|
||||
{
|
||||
return blk_mq_realloc_tag_set_tags(set, 0, new_nr_hw_queues);
|
||||
}
|
||||
|
||||
/*
|
||||
* Alloc a tag set to be associated with one or more request queues.
|
||||
* May fail with EINVAL for various error conditions. May adjust the
|
||||
|
@ -3426,7 +3439,7 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
|
|||
if (set->nr_maps == 1 && set->nr_hw_queues > nr_cpu_ids)
|
||||
set->nr_hw_queues = nr_cpu_ids;
|
||||
|
||||
if (blk_mq_realloc_tag_set_tags(set, 0, set->nr_hw_queues) < 0)
|
||||
if (blk_mq_alloc_tag_set_tags(set, set->nr_hw_queues) < 0)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = -ENOMEM;
|
||||
|
@ -3861,9 +3874,10 @@ int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
|
|||
* the state. Like for the other success return cases, the
|
||||
* caller is responsible for checking if the IO completed. If
|
||||
* the IO isn't complete, we'll get called again and will go
|
||||
* straight to the busy poll loop.
|
||||
* straight to the busy poll loop. If specified not to spin,
|
||||
* we also should not sleep.
|
||||
*/
|
||||
if (blk_mq_poll_hybrid(q, hctx, cookie))
|
||||
if (spin && blk_mq_poll_hybrid(q, hctx, cookie))
|
||||
return 1;
|
||||
|
||||
hctx->poll_considered++;
|
||||
|
|
|
@ -99,7 +99,7 @@ static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue *
|
|||
* blk_mq_map_queue() - map (cmd_flags,type) to hardware queue
|
||||
* @q: request queue
|
||||
* @flags: request command flags
|
||||
* @cpu: cpu ctx
|
||||
* @ctx: software queue cpu ctx
|
||||
*/
|
||||
static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,
|
||||
unsigned int flags,
|
||||
|
@ -182,9 +182,10 @@ static inline bool blk_mq_hw_queue_mapped(struct blk_mq_hw_ctx *hctx)
|
|||
return hctx->nr_ctx && hctx->tags;
|
||||
}
|
||||
|
||||
unsigned int blk_mq_in_flight(struct request_queue *q, struct hd_struct *part);
|
||||
void blk_mq_in_flight_rw(struct request_queue *q, struct hd_struct *part,
|
||||
unsigned int inflight[2]);
|
||||
unsigned int blk_mq_in_flight(struct request_queue *q,
|
||||
struct block_device *part);
|
||||
void blk_mq_in_flight_rw(struct request_queue *q, struct block_device *part,
|
||||
unsigned int inflight[2]);
|
||||
|
||||
static inline void blk_mq_put_dispatch_budget(struct request_queue *q)
|
||||
{
|
||||
|
|
|
@ -587,6 +587,7 @@ static void throtl_pd_online(struct blkg_policy_data *pd)
|
|||
tg_update_has_rules(tg);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
|
||||
static void blk_throtl_update_limit_valid(struct throtl_data *td)
|
||||
{
|
||||
struct cgroup_subsys_state *pos_css;
|
||||
|
@ -607,6 +608,11 @@ static void blk_throtl_update_limit_valid(struct throtl_data *td)
|
|||
|
||||
td->limit_valid[LIMIT_LOW] = low_valid;
|
||||
}
|
||||
#else
|
||||
static inline void blk_throtl_update_limit_valid(struct throtl_data *td)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
static void throtl_upgrade_state(struct throtl_data *td);
|
||||
static void throtl_pd_offline(struct blkg_policy_data *pd)
|
||||
|
|
|
@ -835,7 +835,6 @@ int wbt_init(struct request_queue *q)
|
|||
rwb->enable_state = WBT_STATE_ON_DEFAULT;
|
||||
rwb->wc = 1;
|
||||
rwb->rq_depth.default_depth = RWB_DEF_DEPTH;
|
||||
wbt_update_limits(rwb);
|
||||
|
||||
/*
|
||||
* Assign rwb and add the stats callback.
|
||||
|
|
|
@ -508,15 +508,29 @@ int blk_revalidate_disk_zones(struct gendisk *disk,
|
|||
noio_flag = memalloc_noio_save();
|
||||
ret = disk->fops->report_zones(disk, 0, UINT_MAX,
|
||||
blk_revalidate_zone_cb, &args);
|
||||
if (!ret) {
|
||||
pr_warn("%s: No zones reported\n", disk->disk_name);
|
||||
ret = -ENODEV;
|
||||
}
|
||||
memalloc_noio_restore(noio_flag);
|
||||
|
||||
/*
|
||||
* If zones where reported, make sure that the entire disk capacity
|
||||
* has been checked.
|
||||
*/
|
||||
if (ret > 0 && args.sector != get_capacity(disk)) {
|
||||
pr_warn("%s: Missing zones from sector %llu\n",
|
||||
disk->disk_name, args.sector);
|
||||
ret = -ENODEV;
|
||||
}
|
||||
|
||||
/*
|
||||
* Install the new bitmaps and update nr_zones only once the queue is
|
||||
* stopped and all I/Os are completed (i.e. a scheduler is not
|
||||
* referencing the bitmaps).
|
||||
*/
|
||||
blk_mq_freeze_queue(q);
|
||||
if (ret >= 0) {
|
||||
if (ret > 0) {
|
||||
blk_queue_chunk_sectors(q, args.zone_sectors);
|
||||
q->nr_zones = args.nr_zones;
|
||||
swap(q->seq_zones_wlock, args.seq_zones_wlock);
|
||||
|
|
85
block/blk.h
85
block/blk.h
|
@ -25,7 +25,6 @@ struct blk_flush_queue {
|
|||
struct list_head flush_data_in_flight;
|
||||
struct request *flush_rq;
|
||||
|
||||
struct lock_class_key key;
|
||||
spinlock_t mq_flush_lock;
|
||||
};
|
||||
|
||||
|
@ -215,7 +214,7 @@ static inline void elevator_exit(struct request_queue *q,
|
|||
__elevator_exit(q, e);
|
||||
}
|
||||
|
||||
struct hd_struct *__disk_get_part(struct gendisk *disk, int partno);
|
||||
struct block_device *__disk_get_part(struct gendisk *disk, int partno);
|
||||
|
||||
ssize_t part_size_show(struct device *dev, struct device_attribute *attr,
|
||||
char *buf);
|
||||
|
@ -348,97 +347,21 @@ void blk_queue_free_zone_bitmaps(struct request_queue *q);
|
|||
static inline void blk_queue_free_zone_bitmaps(struct request_queue *q) {}
|
||||
#endif
|
||||
|
||||
struct hd_struct *disk_map_sector_rcu(struct gendisk *disk, sector_t sector);
|
||||
struct block_device *disk_map_sector_rcu(struct gendisk *disk, sector_t sector);
|
||||
|
||||
int blk_alloc_devt(struct hd_struct *part, dev_t *devt);
|
||||
int blk_alloc_devt(struct block_device *part, dev_t *devt);
|
||||
void blk_free_devt(dev_t devt);
|
||||
void blk_invalidate_devt(dev_t devt);
|
||||
char *disk_name(struct gendisk *hd, int partno, char *buf);
|
||||
#define ADDPART_FLAG_NONE 0
|
||||
#define ADDPART_FLAG_RAID 1
|
||||
#define ADDPART_FLAG_WHOLEDISK 2
|
||||
void delete_partition(struct hd_struct *part);
|
||||
void delete_partition(struct block_device *part);
|
||||
int bdev_add_partition(struct block_device *bdev, int partno,
|
||||
sector_t start, sector_t length);
|
||||
int bdev_del_partition(struct block_device *bdev, int partno);
|
||||
int bdev_resize_partition(struct block_device *bdev, int partno,
|
||||
sector_t start, sector_t length);
|
||||
int disk_expand_part_tbl(struct gendisk *disk, int target);
|
||||
int hd_ref_init(struct hd_struct *part);
|
||||
|
||||
/* no need to get/put refcount of part0 */
|
||||
static inline int hd_struct_try_get(struct hd_struct *part)
|
||||
{
|
||||
if (part->partno)
|
||||
return percpu_ref_tryget_live(&part->ref);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline void hd_struct_put(struct hd_struct *part)
|
||||
{
|
||||
if (part->partno)
|
||||
percpu_ref_put(&part->ref);
|
||||
}
|
||||
|
||||
static inline void hd_free_part(struct hd_struct *part)
|
||||
{
|
||||
free_percpu(part->dkstats);
|
||||
kfree(part->info);
|
||||
percpu_ref_exit(&part->ref);
|
||||
}
|
||||
|
||||
/*
|
||||
* Any access of part->nr_sects which is not protected by partition
|
||||
* bd_mutex or gendisk bdev bd_mutex, should be done using this
|
||||
* accessor function.
|
||||
*
|
||||
* Code written along the lines of i_size_read() and i_size_write().
|
||||
* CONFIG_PREEMPTION case optimizes the case of UP kernel with preemption
|
||||
* on.
|
||||
*/
|
||||
static inline sector_t part_nr_sects_read(struct hd_struct *part)
|
||||
{
|
||||
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
|
||||
sector_t nr_sects;
|
||||
unsigned seq;
|
||||
do {
|
||||
seq = read_seqcount_begin(&part->nr_sects_seq);
|
||||
nr_sects = part->nr_sects;
|
||||
} while (read_seqcount_retry(&part->nr_sects_seq, seq));
|
||||
return nr_sects;
|
||||
#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION)
|
||||
sector_t nr_sects;
|
||||
|
||||
preempt_disable();
|
||||
nr_sects = part->nr_sects;
|
||||
preempt_enable();
|
||||
return nr_sects;
|
||||
#else
|
||||
return part->nr_sects;
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Should be called with mutex lock held (typically bd_mutex) of partition
|
||||
* to provide mutual exlusion among writers otherwise seqcount might be
|
||||
* left in wrong state leaving the readers spinning infinitely.
|
||||
*/
|
||||
static inline void part_nr_sects_write(struct hd_struct *part, sector_t size)
|
||||
{
|
||||
#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
|
||||
preempt_disable();
|
||||
write_seqcount_begin(&part->nr_sects_seq);
|
||||
part->nr_sects = size;
|
||||
write_seqcount_end(&part->nr_sects_seq);
|
||||
preempt_enable();
|
||||
#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION)
|
||||
preempt_disable();
|
||||
part->nr_sects = size;
|
||||
preempt_enable();
|
||||
#else
|
||||
part->nr_sects = size;
|
||||
#endif
|
||||
}
|
||||
|
||||
int bio_add_hw_page(struct request_queue *q, struct bio *bio,
|
||||
struct page *page, unsigned int len, unsigned int offset,
|
||||
|
|
|
@ -340,7 +340,7 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
|
|||
}
|
||||
}
|
||||
|
||||
trace_block_bio_bounce(q, *bio_orig);
|
||||
trace_block_bio_bounce(*bio_orig);
|
||||
|
||||
bio->bi_flags |= (1 << BIO_BOUNCED);
|
||||
|
||||
|
|
565
block/genhd.c
565
block/genhd.c
File diff suppressed because it is too large
Load Diff
|
@ -35,15 +35,6 @@ static int blkpg_do_ioctl(struct block_device *bdev,
|
|||
start = p.start >> SECTOR_SHIFT;
|
||||
length = p.length >> SECTOR_SHIFT;
|
||||
|
||||
/* check for fit in a hd_struct */
|
||||
if (sizeof(sector_t) < sizeof(long long)) {
|
||||
long pstart = start, plength = length;
|
||||
|
||||
if (pstart != start || plength != length || pstart < 0 ||
|
||||
plength < 0 || p.pno > 65535)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
switch (op) {
|
||||
case BLKPG_ADD_PARTITION:
|
||||
/* check if partition is aligned to blocksize */
|
||||
|
@ -219,23 +210,6 @@ static int compat_put_ulong(compat_ulong_t __user *argp, compat_ulong_t val)
|
|||
}
|
||||
#endif
|
||||
|
||||
int __blkdev_driver_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
unsigned cmd, unsigned long arg)
|
||||
{
|
||||
struct gendisk *disk = bdev->bd_disk;
|
||||
|
||||
if (disk->fops->ioctl)
|
||||
return disk->fops->ioctl(bdev, mode, cmd, arg);
|
||||
|
||||
return -ENOTTY;
|
||||
}
|
||||
/*
|
||||
* For the record: _GPL here is only because somebody decided to slap it
|
||||
* on the previous export. Sheer idiocy, since it wasn't copyrightable
|
||||
* at all and could be open-coded without any exports by anybody who cares.
|
||||
*/
|
||||
EXPORT_SYMBOL_GPL(__blkdev_driver_ioctl);
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
/*
|
||||
* This is the equivalent of compat_ptr_ioctl(), to be used by block
|
||||
|
@ -346,38 +320,11 @@ static int blkdev_pr_clear(struct block_device *bdev,
|
|||
return ops->pr_clear(bdev, c.key);
|
||||
}
|
||||
|
||||
/*
|
||||
* Is it an unrecognized ioctl? The correct returns are either
|
||||
* ENOTTY (final) or ENOIOCTLCMD ("I don't know this one, try a
|
||||
* fallback"). ENOIOCTLCMD gets turned into ENOTTY by the ioctl
|
||||
* code before returning.
|
||||
*
|
||||
* Confused drivers sometimes return EINVAL, which is wrong. It
|
||||
* means "I understood the ioctl command, but the parameters to
|
||||
* it were wrong".
|
||||
*
|
||||
* We should aim to just fix the broken drivers, the EINVAL case
|
||||
* should go away.
|
||||
*/
|
||||
static inline int is_unrecognized_ioctl(int ret)
|
||||
{
|
||||
return ret == -EINVAL ||
|
||||
ret == -ENOTTY ||
|
||||
ret == -ENOIOCTLCMD;
|
||||
}
|
||||
|
||||
static int blkdev_flushbuf(struct block_device *bdev, fmode_t mode,
|
||||
unsigned cmd, unsigned long arg)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EACCES;
|
||||
|
||||
ret = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
|
||||
if (!is_unrecognized_ioctl(ret))
|
||||
return ret;
|
||||
|
||||
fsync_bdev(bdev);
|
||||
invalidate_bdev(bdev);
|
||||
return 0;
|
||||
|
@ -391,12 +338,14 @@ static int blkdev_roset(struct block_device *bdev, fmode_t mode,
|
|||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EACCES;
|
||||
|
||||
ret = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
|
||||
if (!is_unrecognized_ioctl(ret))
|
||||
return ret;
|
||||
if (get_user(n, (int __user *)arg))
|
||||
return -EFAULT;
|
||||
set_device_ro(bdev, n);
|
||||
if (bdev->bd_disk->fops->set_read_only) {
|
||||
ret = bdev->bd_disk->fops->set_read_only(bdev, n);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
bdev->bd_read_only = n;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -619,10 +568,12 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
|
|||
}
|
||||
|
||||
ret = blkdev_common_ioctl(bdev, mode, cmd, arg, argp);
|
||||
if (ret == -ENOIOCTLCMD)
|
||||
return __blkdev_driver_ioctl(bdev, mode, cmd, arg);
|
||||
if (ret != -ENOIOCTLCMD)
|
||||
return ret;
|
||||
|
||||
return ret;
|
||||
if (!bdev->bd_disk->fops->ioctl)
|
||||
return -ENOTTY;
|
||||
return bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blkdev_ioctl); /* for /dev/raw */
|
||||
|
||||
|
@ -639,8 +590,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
|
|||
{
|
||||
int ret;
|
||||
void __user *argp = compat_ptr(arg);
|
||||
struct inode *inode = file->f_mapping->host;
|
||||
struct block_device *bdev = inode->i_bdev;
|
||||
struct block_device *bdev = I_BDEV(file->f_mapping->host);
|
||||
struct gendisk *disk = bdev->bd_disk;
|
||||
fmode_t mode = file->f_mode;
|
||||
loff_t size;
|
||||
|
|
|
@ -85,6 +85,13 @@ static int (*check_part[])(struct parsed_partitions *) = {
|
|||
NULL
|
||||
};
|
||||
|
||||
static void bdev_set_nr_sectors(struct block_device *bdev, sector_t sectors)
|
||||
{
|
||||
spin_lock(&bdev->bd_size_lock);
|
||||
i_size_write(bdev->bd_inode, (loff_t)sectors << SECTOR_SHIFT);
|
||||
spin_unlock(&bdev->bd_size_lock);
|
||||
}
|
||||
|
||||
static struct parsed_partitions *allocate_partitions(struct gendisk *hd)
|
||||
{
|
||||
struct parsed_partitions *state;
|
||||
|
@ -175,44 +182,39 @@ static struct parsed_partitions *check_partition(struct gendisk *hd,
|
|||
static ssize_t part_partition_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct hd_struct *p = dev_to_part(dev);
|
||||
|
||||
return sprintf(buf, "%d\n", p->partno);
|
||||
return sprintf(buf, "%d\n", dev_to_bdev(dev)->bd_partno);
|
||||
}
|
||||
|
||||
static ssize_t part_start_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct hd_struct *p = dev_to_part(dev);
|
||||
|
||||
return sprintf(buf, "%llu\n",(unsigned long long)p->start_sect);
|
||||
return sprintf(buf, "%llu\n", dev_to_bdev(dev)->bd_start_sect);
|
||||
}
|
||||
|
||||
static ssize_t part_ro_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct hd_struct *p = dev_to_part(dev);
|
||||
return sprintf(buf, "%d\n", p->policy ? 1 : 0);
|
||||
return sprintf(buf, "%d\n", dev_to_bdev(dev)->bd_read_only);
|
||||
}
|
||||
|
||||
static ssize_t part_alignment_offset_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct hd_struct *p = dev_to_part(dev);
|
||||
struct block_device *bdev = dev_to_bdev(dev);
|
||||
|
||||
return sprintf(buf, "%u\n",
|
||||
queue_limit_alignment_offset(&part_to_disk(p)->queue->limits,
|
||||
p->start_sect));
|
||||
queue_limit_alignment_offset(&bdev->bd_disk->queue->limits,
|
||||
bdev->bd_start_sect));
|
||||
}
|
||||
|
||||
static ssize_t part_discard_alignment_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
struct hd_struct *p = dev_to_part(dev);
|
||||
struct block_device *bdev = dev_to_bdev(dev);
|
||||
|
||||
return sprintf(buf, "%u\n",
|
||||
queue_limit_discard_alignment(&part_to_disk(p)->queue->limits,
|
||||
p->start_sect));
|
||||
queue_limit_discard_alignment(&bdev->bd_disk->queue->limits,
|
||||
bdev->bd_start_sect));
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(partition, 0444, part_partition_show, NULL);
|
||||
|
@ -257,19 +259,17 @@ static const struct attribute_group *part_attr_groups[] = {
|
|||
|
||||
static void part_release(struct device *dev)
|
||||
{
|
||||
struct hd_struct *p = dev_to_part(dev);
|
||||
blk_free_devt(dev->devt);
|
||||
hd_free_part(p);
|
||||
kfree(p);
|
||||
bdput(dev_to_bdev(dev));
|
||||
}
|
||||
|
||||
static int part_uevent(struct device *dev, struct kobj_uevent_env *env)
|
||||
{
|
||||
struct hd_struct *part = dev_to_part(dev);
|
||||
struct block_device *part = dev_to_bdev(dev);
|
||||
|
||||
add_uevent_var(env, "PARTN=%u", part->partno);
|
||||
if (part->info && part->info->volname[0])
|
||||
add_uevent_var(env, "PARTNAME=%s", part->info->volname);
|
||||
add_uevent_var(env, "PARTN=%u", part->bd_partno);
|
||||
if (part->bd_meta_info && part->bd_meta_info->volname[0])
|
||||
add_uevent_var(env, "PARTNAME=%s", part->bd_meta_info->volname);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -280,73 +280,29 @@ struct device_type part_type = {
|
|||
.uevent = part_uevent,
|
||||
};
|
||||
|
||||
static void hd_struct_free_work(struct work_struct *work)
|
||||
{
|
||||
struct hd_struct *part =
|
||||
container_of(to_rcu_work(work), struct hd_struct, rcu_work);
|
||||
struct gendisk *disk = part_to_disk(part);
|
||||
|
||||
/*
|
||||
* Release the disk reference acquired in delete_partition here.
|
||||
* We can't release it in hd_struct_free because the final put_device
|
||||
* needs process context and thus can't be run directly from a
|
||||
* percpu_ref ->release handler.
|
||||
*/
|
||||
put_device(disk_to_dev(disk));
|
||||
|
||||
part->start_sect = 0;
|
||||
part->nr_sects = 0;
|
||||
part_stat_set_all(part, 0);
|
||||
put_device(part_to_dev(part));
|
||||
}
|
||||
|
||||
static void hd_struct_free(struct percpu_ref *ref)
|
||||
{
|
||||
struct hd_struct *part = container_of(ref, struct hd_struct, ref);
|
||||
struct gendisk *disk = part_to_disk(part);
|
||||
struct disk_part_tbl *ptbl =
|
||||
rcu_dereference_protected(disk->part_tbl, 1);
|
||||
|
||||
rcu_assign_pointer(ptbl->last_lookup, NULL);
|
||||
|
||||
INIT_RCU_WORK(&part->rcu_work, hd_struct_free_work);
|
||||
queue_rcu_work(system_wq, &part->rcu_work);
|
||||
}
|
||||
|
||||
int hd_ref_init(struct hd_struct *part)
|
||||
{
|
||||
if (percpu_ref_init(&part->ref, hd_struct_free, 0, GFP_KERNEL))
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Must be called either with bd_mutex held, before a disk can be opened or
|
||||
* after all disk users are gone.
|
||||
*/
|
||||
void delete_partition(struct hd_struct *part)
|
||||
void delete_partition(struct block_device *part)
|
||||
{
|
||||
struct gendisk *disk = part_to_disk(part);
|
||||
struct gendisk *disk = part->bd_disk;
|
||||
struct disk_part_tbl *ptbl =
|
||||
rcu_dereference_protected(disk->part_tbl, 1);
|
||||
|
||||
/*
|
||||
* ->part_tbl is referenced in this part's release handler, so
|
||||
* we have to hold the disk device
|
||||
*/
|
||||
get_device(disk_to_dev(disk));
|
||||
rcu_assign_pointer(ptbl->part[part->partno], NULL);
|
||||
kobject_put(part->holder_dir);
|
||||
device_del(part_to_dev(part));
|
||||
rcu_assign_pointer(ptbl->part[part->bd_partno], NULL);
|
||||
rcu_assign_pointer(ptbl->last_lookup, NULL);
|
||||
|
||||
kobject_put(part->bd_holder_dir);
|
||||
device_del(&part->bd_device);
|
||||
|
||||
/*
|
||||
* Remove gendisk pointer from idr so that it cannot be looked up
|
||||
* while RCU period before freeing gendisk is running to prevent
|
||||
* use-after-free issues. Note that the device number stays
|
||||
* "in-use" until we really free the gendisk.
|
||||
* Remove the block device from the inode hash, so that it cannot be
|
||||
* looked up any more even when openers still hold references.
|
||||
*/
|
||||
blk_invalidate_devt(part_devt(part));
|
||||
percpu_ref_kill(&part->ref);
|
||||
remove_inode_hash(part->bd_inode);
|
||||
|
||||
put_device(&part->bd_device);
|
||||
}
|
||||
|
||||
static ssize_t whole_disk_show(struct device *dev,
|
||||
|
@ -360,14 +316,14 @@ static DEVICE_ATTR(whole_disk, 0444, whole_disk_show, NULL);
|
|||
* Must be called either with bd_mutex held, before a disk can be opened or
|
||||
* after all disk users are gone.
|
||||
*/
|
||||
static struct hd_struct *add_partition(struct gendisk *disk, int partno,
|
||||
static struct block_device *add_partition(struct gendisk *disk, int partno,
|
||||
sector_t start, sector_t len, int flags,
|
||||
struct partition_meta_info *info)
|
||||
{
|
||||
struct hd_struct *p;
|
||||
dev_t devt = MKDEV(0, 0);
|
||||
struct device *ddev = disk_to_dev(disk);
|
||||
struct device *pdev;
|
||||
struct block_device *bdev;
|
||||
struct disk_part_tbl *ptbl;
|
||||
const char *dname;
|
||||
int err;
|
||||
|
@ -398,36 +354,22 @@ static struct hd_struct *add_partition(struct gendisk *disk, int partno,
|
|||
if (ptbl->part[partno])
|
||||
return ERR_PTR(-EBUSY);
|
||||
|
||||
p = kzalloc(sizeof(*p), GFP_KERNEL);
|
||||
if (!p)
|
||||
return ERR_PTR(-EBUSY);
|
||||
bdev = bdev_alloc(disk, partno);
|
||||
if (!bdev)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
p->dkstats = alloc_percpu(struct disk_stats);
|
||||
if (!p->dkstats) {
|
||||
err = -ENOMEM;
|
||||
goto out_free;
|
||||
}
|
||||
|
||||
hd_sects_seq_init(p);
|
||||
pdev = part_to_dev(p);
|
||||
|
||||
p->start_sect = start;
|
||||
p->nr_sects = len;
|
||||
p->partno = partno;
|
||||
p->policy = get_disk_ro(disk);
|
||||
bdev->bd_start_sect = start;
|
||||
bdev_set_nr_sectors(bdev, len);
|
||||
bdev->bd_read_only = get_disk_ro(disk);
|
||||
|
||||
if (info) {
|
||||
struct partition_meta_info *pinfo;
|
||||
|
||||
pinfo = kzalloc_node(sizeof(*pinfo), GFP_KERNEL, disk->node_id);
|
||||
if (!pinfo) {
|
||||
err = -ENOMEM;
|
||||
goto out_free_stats;
|
||||
}
|
||||
memcpy(pinfo, info, sizeof(*info));
|
||||
p->info = pinfo;
|
||||
err = -ENOMEM;
|
||||
bdev->bd_meta_info = kmemdup(info, sizeof(*info), GFP_KERNEL);
|
||||
if (!bdev->bd_meta_info)
|
||||
goto out_bdput;
|
||||
}
|
||||
|
||||
pdev = &bdev->bd_device;
|
||||
dname = dev_name(ddev);
|
||||
if (isdigit(dname[strlen(dname) - 1]))
|
||||
dev_set_name(pdev, "%sp%d", dname, partno);
|
||||
|
@ -439,9 +381,9 @@ static struct hd_struct *add_partition(struct gendisk *disk, int partno,
|
|||
pdev->type = &part_type;
|
||||
pdev->parent = ddev;
|
||||
|
||||
err = blk_alloc_devt(p, &devt);
|
||||
err = blk_alloc_devt(bdev, &devt);
|
||||
if (err)
|
||||
goto out_free_info;
|
||||
goto out_bdput;
|
||||
pdev->devt = devt;
|
||||
|
||||
/* delay uevent until 'holders' subdir is created */
|
||||
|
@ -451,8 +393,8 @@ static struct hd_struct *add_partition(struct gendisk *disk, int partno,
|
|||
goto out_put;
|
||||
|
||||
err = -ENOMEM;
|
||||
p->holder_dir = kobject_create_and_add("holders", &pdev->kobj);
|
||||
if (!p->holder_dir)
|
||||
bdev->bd_holder_dir = kobject_create_and_add("holders", &pdev->kobj);
|
||||
if (!bdev->bd_holder_dir)
|
||||
goto out_del;
|
||||
|
||||
dev_set_uevent_suppress(pdev, 0);
|
||||
|
@ -462,32 +404,20 @@ static struct hd_struct *add_partition(struct gendisk *disk, int partno,
|
|||
goto out_del;
|
||||
}
|
||||
|
||||
err = hd_ref_init(p);
|
||||
if (err) {
|
||||
if (flags & ADDPART_FLAG_WHOLEDISK)
|
||||
goto out_remove_file;
|
||||
goto out_del;
|
||||
}
|
||||
|
||||
/* everything is up and running, commence */
|
||||
rcu_assign_pointer(ptbl->part[partno], p);
|
||||
bdev_add(bdev, devt);
|
||||
rcu_assign_pointer(ptbl->part[partno], bdev);
|
||||
|
||||
/* suppress uevent if the disk suppresses it */
|
||||
if (!dev_get_uevent_suppress(ddev))
|
||||
kobject_uevent(&pdev->kobj, KOBJ_ADD);
|
||||
return p;
|
||||
return bdev;
|
||||
|
||||
out_free_info:
|
||||
kfree(p->info);
|
||||
out_free_stats:
|
||||
free_percpu(p->dkstats);
|
||||
out_free:
|
||||
kfree(p);
|
||||
out_bdput:
|
||||
bdput(bdev);
|
||||
return ERR_PTR(err);
|
||||
out_remove_file:
|
||||
device_remove_file(pdev, &dev_attr_whole_disk);
|
||||
out_del:
|
||||
kobject_put(p->holder_dir);
|
||||
kobject_put(bdev->bd_holder_dir);
|
||||
device_del(pdev);
|
||||
out_put:
|
||||
put_device(pdev);
|
||||
|
@ -498,14 +428,14 @@ static bool partition_overlaps(struct gendisk *disk, sector_t start,
|
|||
sector_t length, int skip_partno)
|
||||
{
|
||||
struct disk_part_iter piter;
|
||||
struct hd_struct *part;
|
||||
struct block_device *part;
|
||||
bool overlap = false;
|
||||
|
||||
disk_part_iter_init(&piter, disk, DISK_PITER_INCL_EMPTY);
|
||||
while ((part = disk_part_iter_next(&piter))) {
|
||||
if (part->partno == skip_partno ||
|
||||
start >= part->start_sect + part->nr_sects ||
|
||||
start + length <= part->start_sect)
|
||||
if (part->bd_partno == skip_partno ||
|
||||
start >= part->bd_start_sect + bdev_nr_sectors(part) ||
|
||||
start + length <= part->bd_start_sect)
|
||||
continue;
|
||||
overlap = true;
|
||||
break;
|
||||
|
@ -518,7 +448,7 @@ static bool partition_overlaps(struct gendisk *disk, sector_t start,
|
|||
int bdev_add_partition(struct block_device *bdev, int partno,
|
||||
sector_t start, sector_t length)
|
||||
{
|
||||
struct hd_struct *part;
|
||||
struct block_device *part;
|
||||
|
||||
mutex_lock(&bdev->bd_mutex);
|
||||
if (partition_overlaps(bdev->bd_disk, start, length, -1)) {
|
||||
|
@ -534,77 +464,59 @@ int bdev_add_partition(struct block_device *bdev, int partno,
|
|||
|
||||
int bdev_del_partition(struct block_device *bdev, int partno)
|
||||
{
|
||||
struct block_device *bdevp;
|
||||
struct hd_struct *part = NULL;
|
||||
struct block_device *part;
|
||||
int ret;
|
||||
|
||||
bdevp = bdget_disk(bdev->bd_disk, partno);
|
||||
if (!bdevp)
|
||||
part = bdget_disk(bdev->bd_disk, partno);
|
||||
if (!part)
|
||||
return -ENXIO;
|
||||
|
||||
mutex_lock(&bdevp->bd_mutex);
|
||||
mutex_lock(&part->bd_mutex);
|
||||
mutex_lock_nested(&bdev->bd_mutex, 1);
|
||||
|
||||
ret = -ENXIO;
|
||||
part = disk_get_part(bdev->bd_disk, partno);
|
||||
if (!part)
|
||||
goto out_unlock;
|
||||
|
||||
ret = -EBUSY;
|
||||
if (bdevp->bd_openers)
|
||||
if (part->bd_openers)
|
||||
goto out_unlock;
|
||||
|
||||
sync_blockdev(bdevp);
|
||||
invalidate_bdev(bdevp);
|
||||
sync_blockdev(part);
|
||||
invalidate_bdev(part);
|
||||
|
||||
delete_partition(part);
|
||||
ret = 0;
|
||||
out_unlock:
|
||||
mutex_unlock(&bdev->bd_mutex);
|
||||
mutex_unlock(&bdevp->bd_mutex);
|
||||
bdput(bdevp);
|
||||
if (part)
|
||||
disk_put_part(part);
|
||||
mutex_unlock(&part->bd_mutex);
|
||||
bdput(part);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int bdev_resize_partition(struct block_device *bdev, int partno,
|
||||
sector_t start, sector_t length)
|
||||
{
|
||||
struct block_device *bdevp;
|
||||
struct hd_struct *part;
|
||||
struct block_device *part;
|
||||
int ret = 0;
|
||||
|
||||
part = disk_get_part(bdev->bd_disk, partno);
|
||||
part = bdget_disk(bdev->bd_disk, partno);
|
||||
if (!part)
|
||||
return -ENXIO;
|
||||
|
||||
ret = -ENOMEM;
|
||||
bdevp = bdget_part(part);
|
||||
if (!bdevp)
|
||||
goto out_put_part;
|
||||
|
||||
mutex_lock(&bdevp->bd_mutex);
|
||||
mutex_lock(&part->bd_mutex);
|
||||
mutex_lock_nested(&bdev->bd_mutex, 1);
|
||||
|
||||
ret = -EINVAL;
|
||||
if (start != part->start_sect)
|
||||
if (start != part->bd_start_sect)
|
||||
goto out_unlock;
|
||||
|
||||
ret = -EBUSY;
|
||||
if (partition_overlaps(bdev->bd_disk, start, length, partno))
|
||||
goto out_unlock;
|
||||
|
||||
part_nr_sects_write(part, length);
|
||||
bd_set_nr_sectors(bdevp, length);
|
||||
bdev_set_nr_sectors(part, length);
|
||||
|
||||
ret = 0;
|
||||
out_unlock:
|
||||
mutex_unlock(&bdevp->bd_mutex);
|
||||
mutex_unlock(&part->bd_mutex);
|
||||
mutex_unlock(&bdev->bd_mutex);
|
||||
bdput(bdevp);
|
||||
out_put_part:
|
||||
disk_put_part(part);
|
||||
bdput(part);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -627,7 +539,7 @@ static bool disk_unlock_native_capacity(struct gendisk *disk)
|
|||
int blk_drop_partitions(struct block_device *bdev)
|
||||
{
|
||||
struct disk_part_iter piter;
|
||||
struct hd_struct *part;
|
||||
struct block_device *part;
|
||||
|
||||
if (bdev->bd_part_count)
|
||||
return -EBUSY;
|
||||
|
@ -652,7 +564,7 @@ static bool blk_add_partition(struct gendisk *disk, struct block_device *bdev,
|
|||
{
|
||||
sector_t size = state->parts[p].size;
|
||||
sector_t from = state->parts[p].from;
|
||||
struct hd_struct *part;
|
||||
struct block_device *part;
|
||||
|
||||
if (!size)
|
||||
return true;
|
||||
|
@ -692,7 +604,7 @@ static bool blk_add_partition(struct gendisk *disk, struct block_device *bdev,
|
|||
|
||||
if (IS_BUILTIN(CONFIG_BLK_DEV_MD) &&
|
||||
(state->parts[p].flags & ADDPART_FLAG_RAID))
|
||||
md_autodetect_dev(part_to_dev(part)->devt);
|
||||
md_autodetect_dev(part->bd_dev);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -201,7 +201,7 @@ struct amiga_floppy_struct {
|
|||
int busy; /* true when drive is active */
|
||||
int dirty; /* true when trackbuf is not on disk */
|
||||
int status; /* current error code for unit */
|
||||
struct gendisk *gendisk;
|
||||
struct gendisk *gendisk[2];
|
||||
struct blk_mq_tag_set tag_set;
|
||||
};
|
||||
|
||||
|
@ -1669,6 +1669,11 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
|
|||
return -EBUSY;
|
||||
}
|
||||
|
||||
if (unit[drive].type->code == FD_NODRIVE) {
|
||||
mutex_unlock(&amiflop_mutex);
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
if (mode & (FMODE_READ|FMODE_WRITE)) {
|
||||
bdev_check_media_change(bdev);
|
||||
if (mode & FMODE_WRITE) {
|
||||
|
@ -1695,7 +1700,7 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
|
|||
unit[drive].dtype=&data_types[system];
|
||||
unit[drive].blocks=unit[drive].type->heads*unit[drive].type->tracks*
|
||||
data_types[system].sects*unit[drive].type->sect_mult;
|
||||
set_capacity(unit[drive].gendisk, unit[drive].blocks);
|
||||
set_capacity(unit[drive].gendisk[system], unit[drive].blocks);
|
||||
|
||||
printk(KERN_INFO "fd%d: accessing %s-disk with %s-layout\n",drive,
|
||||
unit[drive].type->name, data_types[system].name);
|
||||
|
@ -1772,36 +1777,68 @@ static const struct blk_mq_ops amiflop_mq_ops = {
|
|||
.queue_rq = amiflop_queue_rq,
|
||||
};
|
||||
|
||||
static struct gendisk *fd_alloc_disk(int drive)
|
||||
static int fd_alloc_disk(int drive, int system)
|
||||
{
|
||||
struct gendisk *disk;
|
||||
|
||||
disk = alloc_disk(1);
|
||||
if (!disk)
|
||||
goto out;
|
||||
|
||||
disk->queue = blk_mq_init_sq_queue(&unit[drive].tag_set, &amiflop_mq_ops,
|
||||
2, BLK_MQ_F_SHOULD_MERGE);
|
||||
if (IS_ERR(disk->queue)) {
|
||||
disk->queue = NULL;
|
||||
disk->queue = blk_mq_init_queue(&unit[drive].tag_set);
|
||||
if (IS_ERR(disk->queue))
|
||||
goto out_put_disk;
|
||||
}
|
||||
|
||||
unit[drive].trackbuf = kmalloc(FLOPPY_MAX_SECTORS * 512, GFP_KERNEL);
|
||||
if (!unit[drive].trackbuf)
|
||||
goto out_cleanup_queue;
|
||||
disk->major = FLOPPY_MAJOR;
|
||||
disk->first_minor = drive + system;
|
||||
disk->fops = &floppy_fops;
|
||||
disk->events = DISK_EVENT_MEDIA_CHANGE;
|
||||
if (system)
|
||||
sprintf(disk->disk_name, "fd%d_msdos", drive);
|
||||
else
|
||||
sprintf(disk->disk_name, "fd%d", drive);
|
||||
disk->private_data = &unit[drive];
|
||||
set_capacity(disk, 880 * 2);
|
||||
|
||||
return disk;
|
||||
unit[drive].gendisk[system] = disk;
|
||||
add_disk(disk);
|
||||
return 0;
|
||||
|
||||
out_cleanup_queue:
|
||||
blk_cleanup_queue(disk->queue);
|
||||
disk->queue = NULL;
|
||||
blk_mq_free_tag_set(&unit[drive].tag_set);
|
||||
out_put_disk:
|
||||
disk->queue = NULL;
|
||||
put_disk(disk);
|
||||
out:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static int fd_alloc_drive(int drive)
|
||||
{
|
||||
unit[drive].trackbuf = kmalloc(FLOPPY_MAX_SECTORS * 512, GFP_KERNEL);
|
||||
if (!unit[drive].trackbuf)
|
||||
goto out;
|
||||
|
||||
memset(&unit[drive].tag_set, 0, sizeof(unit[drive].tag_set));
|
||||
unit[drive].tag_set.ops = &amiflop_mq_ops;
|
||||
unit[drive].tag_set.nr_hw_queues = 1;
|
||||
unit[drive].tag_set.nr_maps = 1;
|
||||
unit[drive].tag_set.queue_depth = 2;
|
||||
unit[drive].tag_set.numa_node = NUMA_NO_NODE;
|
||||
unit[drive].tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
|
||||
if (blk_mq_alloc_tag_set(&unit[drive].tag_set))
|
||||
goto out_cleanup_trackbuf;
|
||||
|
||||
pr_cont(" fd%d", drive);
|
||||
|
||||
if (fd_alloc_disk(drive, 0) || fd_alloc_disk(drive, 1))
|
||||
goto out_cleanup_tagset;
|
||||
return 0;
|
||||
|
||||
out_cleanup_tagset:
|
||||
blk_mq_free_tag_set(&unit[drive].tag_set);
|
||||
out_cleanup_trackbuf:
|
||||
kfree(unit[drive].trackbuf);
|
||||
out:
|
||||
unit[drive].type->code = FD_NODRIVE;
|
||||
return NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static int __init fd_probe_drives(void)
|
||||
|
@ -1812,29 +1849,16 @@ static int __init fd_probe_drives(void)
|
|||
drives=0;
|
||||
nomem=0;
|
||||
for(drive=0;drive<FD_MAX_UNITS;drive++) {
|
||||
struct gendisk *disk;
|
||||
fd_probe(drive);
|
||||
if (unit[drive].type->code == FD_NODRIVE)
|
||||
continue;
|
||||
|
||||
disk = fd_alloc_disk(drive);
|
||||
if (!disk) {
|
||||
if (fd_alloc_drive(drive) < 0) {
|
||||
pr_cont(" no mem for fd%d", drive);
|
||||
nomem = 1;
|
||||
continue;
|
||||
}
|
||||
unit[drive].gendisk = disk;
|
||||
drives++;
|
||||
|
||||
pr_cont(" fd%d",drive);
|
||||
disk->major = FLOPPY_MAJOR;
|
||||
disk->first_minor = drive;
|
||||
disk->fops = &floppy_fops;
|
||||
disk->events = DISK_EVENT_MEDIA_CHANGE;
|
||||
sprintf(disk->disk_name, "fd%d", drive);
|
||||
disk->private_data = &unit[drive];
|
||||
set_capacity(disk, 880*2);
|
||||
add_disk(disk);
|
||||
}
|
||||
if ((drives > 0) || (nomem == 0)) {
|
||||
if (drives == 0)
|
||||
|
@ -1846,15 +1870,6 @@ static int __init fd_probe_drives(void)
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static struct kobject *floppy_find(dev_t dev, int *part, void *data)
|
||||
{
|
||||
int drive = *part & 3;
|
||||
if (unit[drive].type->code == FD_NODRIVE)
|
||||
return NULL;
|
||||
*part = 0;
|
||||
return get_disk_and_module(unit[drive].gendisk);
|
||||
}
|
||||
|
||||
static int __init amiga_floppy_probe(struct platform_device *pdev)
|
||||
{
|
||||
int i, ret;
|
||||
|
@ -1884,9 +1899,6 @@ static int __init amiga_floppy_probe(struct platform_device *pdev)
|
|||
if (fd_probe_drives() < 1) /* No usable drives */
|
||||
goto out_probe;
|
||||
|
||||
blk_register_region(MKDEV(FLOPPY_MAJOR, 0), 256, THIS_MODULE,
|
||||
floppy_find, NULL, NULL);
|
||||
|
||||
/* initialize variables */
|
||||
timer_setup(&motor_on_timer, motor_on_callback, 0);
|
||||
motor_on_timer.expires = 0;
|
||||
|
|
|
@ -890,19 +890,13 @@ void
|
|||
aoecmd_sleepwork(struct work_struct *work)
|
||||
{
|
||||
struct aoedev *d = container_of(work, struct aoedev, work);
|
||||
struct block_device *bd;
|
||||
u64 ssize;
|
||||
|
||||
if (d->flags & DEVFL_GDALLOC)
|
||||
aoeblk_gdalloc(d);
|
||||
|
||||
if (d->flags & DEVFL_NEWSIZE) {
|
||||
ssize = get_capacity(d->gd);
|
||||
bd = bdget_disk(d->gd, 0);
|
||||
if (bd) {
|
||||
bd_set_nr_sectors(bd, ssize);
|
||||
bdput(bd);
|
||||
}
|
||||
set_capacity_and_notify(d->gd, d->ssize);
|
||||
|
||||
spin_lock_irq(&d->lock);
|
||||
d->flags |= DEVFL_UP;
|
||||
d->flags &= ~DEVFL_NEWSIZE;
|
||||
|
@ -971,10 +965,9 @@ ataid_complete(struct aoedev *d, struct aoetgt *t, unsigned char *id)
|
|||
d->geo.start = 0;
|
||||
if (d->flags & (DEVFL_GDALLOC|DEVFL_NEWSIZE))
|
||||
return;
|
||||
if (d->gd != NULL) {
|
||||
set_capacity(d->gd, ssize);
|
||||
if (d->gd != NULL)
|
||||
d->flags |= DEVFL_NEWSIZE;
|
||||
} else
|
||||
else
|
||||
d->flags |= DEVFL_GDALLOC;
|
||||
schedule_work(&d->work);
|
||||
}
|
||||
|
|
|
@ -297,7 +297,7 @@ static struct atari_floppy_struct {
|
|||
unsigned int wpstat; /* current state of WP signal (for
|
||||
disk change detection) */
|
||||
int flags; /* flags */
|
||||
struct gendisk *disk;
|
||||
struct gendisk *disk[NUM_DISK_MINORS];
|
||||
int ref;
|
||||
int type;
|
||||
struct blk_mq_tag_set tag_set;
|
||||
|
@ -723,12 +723,16 @@ static void fd_error( void )
|
|||
|
||||
static int do_format(int drive, int type, struct atari_format_descr *desc)
|
||||
{
|
||||
struct request_queue *q = unit[drive].disk->queue;
|
||||
struct request_queue *q;
|
||||
unsigned char *p;
|
||||
int sect, nsect;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
if (type)
|
||||
type--;
|
||||
|
||||
q = unit[drive].disk[type]->queue;
|
||||
blk_mq_freeze_queue(q);
|
||||
blk_mq_quiesce_queue(q);
|
||||
|
||||
|
@ -738,7 +742,7 @@ static int do_format(int drive, int type, struct atari_format_descr *desc)
|
|||
local_irq_restore(flags);
|
||||
|
||||
if (type) {
|
||||
if (--type >= NUM_DISK_MINORS ||
|
||||
if (type >= NUM_DISK_MINORS ||
|
||||
minor2disktype[type].drive_types > DriveType) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
|
@ -1154,7 +1158,7 @@ static void fd_rwsec_done1(int status)
|
|||
if (SUDT[-1].blocks > ReqBlock) {
|
||||
/* try another disk type */
|
||||
SUDT--;
|
||||
set_capacity(unit[SelectedDrive].disk,
|
||||
set_capacity(unit[SelectedDrive].disk[0],
|
||||
SUDT->blocks);
|
||||
} else
|
||||
Probing = 0;
|
||||
|
@ -1169,7 +1173,7 @@ static void fd_rwsec_done1(int status)
|
|||
/* record not found, but not probing. Maybe stretch wrong ? Restart probing */
|
||||
if (SUD.autoprobe) {
|
||||
SUDT = atari_disk_type + StartDiskType[DriveType];
|
||||
set_capacity(unit[SelectedDrive].disk,
|
||||
set_capacity(unit[SelectedDrive].disk[0],
|
||||
SUDT->blocks);
|
||||
Probing = 1;
|
||||
}
|
||||
|
@ -1515,7 +1519,7 @@ static blk_status_t ataflop_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|||
if (!UDT) {
|
||||
Probing = 1;
|
||||
UDT = atari_disk_type + StartDiskType[DriveType];
|
||||
set_capacity(floppy->disk, UDT->blocks);
|
||||
set_capacity(bd->rq->rq_disk, UDT->blocks);
|
||||
UD.autoprobe = 1;
|
||||
}
|
||||
}
|
||||
|
@ -1533,7 +1537,7 @@ static blk_status_t ataflop_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|||
}
|
||||
type = minor2disktype[type].index;
|
||||
UDT = &atari_disk_type[type];
|
||||
set_capacity(floppy->disk, UDT->blocks);
|
||||
set_capacity(bd->rq->rq_disk, UDT->blocks);
|
||||
UD.autoprobe = 0;
|
||||
}
|
||||
|
||||
|
@ -1658,7 +1662,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode,
|
|||
printk (KERN_INFO "floppy%d: setting %s %p!\n",
|
||||
drive, dtp->name, dtp);
|
||||
UDT = dtp;
|
||||
set_capacity(floppy->disk, UDT->blocks);
|
||||
set_capacity(disk, UDT->blocks);
|
||||
|
||||
if (cmd == FDDEFPRM) {
|
||||
/* save settings as permanent default type */
|
||||
|
@ -1702,7 +1706,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode,
|
|||
return -EINVAL;
|
||||
|
||||
UDT = dtp;
|
||||
set_capacity(floppy->disk, UDT->blocks);
|
||||
set_capacity(disk, UDT->blocks);
|
||||
|
||||
return 0;
|
||||
case FDMSGON:
|
||||
|
@ -1725,7 +1729,7 @@ static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode,
|
|||
UDT = NULL;
|
||||
/* MSch: invalidate default_params */
|
||||
default_params[drive].blocks = 0;
|
||||
set_capacity(floppy->disk, MAX_DISK_SIZE * 2);
|
||||
set_capacity(disk, MAX_DISK_SIZE * 2);
|
||||
fallthrough;
|
||||
case FDFMTEND:
|
||||
case FDFLUSH:
|
||||
|
@ -1962,14 +1966,50 @@ static const struct blk_mq_ops ataflop_mq_ops = {
|
|||
.commit_rqs = ataflop_commit_rqs,
|
||||
};
|
||||
|
||||
static struct kobject *floppy_find(dev_t dev, int *part, void *data)
|
||||
static int ataflop_alloc_disk(unsigned int drive, unsigned int type)
|
||||
{
|
||||
int drive = *part & 3;
|
||||
int type = *part >> 2;
|
||||
struct gendisk *disk;
|
||||
int ret;
|
||||
|
||||
disk = alloc_disk(1);
|
||||
if (!disk)
|
||||
return -ENOMEM;
|
||||
|
||||
disk->queue = blk_mq_init_queue(&unit[drive].tag_set);
|
||||
if (IS_ERR(disk->queue)) {
|
||||
ret = PTR_ERR(disk->queue);
|
||||
disk->queue = NULL;
|
||||
put_disk(disk);
|
||||
return ret;
|
||||
}
|
||||
|
||||
disk->major = FLOPPY_MAJOR;
|
||||
disk->first_minor = drive + (type << 2);
|
||||
sprintf(disk->disk_name, "fd%d", drive);
|
||||
disk->fops = &floppy_fops;
|
||||
disk->events = DISK_EVENT_MEDIA_CHANGE;
|
||||
disk->private_data = &unit[drive];
|
||||
set_capacity(disk, MAX_DISK_SIZE * 2);
|
||||
|
||||
unit[drive].disk[type] = disk;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static DEFINE_MUTEX(ataflop_probe_lock);
|
||||
|
||||
static void ataflop_probe(dev_t dev)
|
||||
{
|
||||
int drive = MINOR(dev) & 3;
|
||||
int type = MINOR(dev) >> 2;
|
||||
|
||||
if (drive >= FD_MAX_UNITS || type > NUM_DISK_MINORS)
|
||||
return NULL;
|
||||
*part = 0;
|
||||
return get_disk_and_module(unit[drive].disk);
|
||||
return;
|
||||
mutex_lock(&ataflop_probe_lock);
|
||||
if (!unit[drive].disk[type]) {
|
||||
if (ataflop_alloc_disk(drive, type) == 0)
|
||||
add_disk(unit[drive].disk[type]);
|
||||
}
|
||||
mutex_unlock(&ataflop_probe_lock);
|
||||
}
|
||||
|
||||
static int __init atari_floppy_init (void)
|
||||
|
@ -1981,23 +2021,26 @@ static int __init atari_floppy_init (void)
|
|||
/* Amiga, Mac, ... don't have Atari-compatible floppy :-) */
|
||||
return -ENODEV;
|
||||
|
||||
if (register_blkdev(FLOPPY_MAJOR,"fd"))
|
||||
return -EBUSY;
|
||||
mutex_lock(&ataflop_probe_lock);
|
||||
ret = __register_blkdev(FLOPPY_MAJOR, "fd", ataflop_probe);
|
||||
if (ret)
|
||||
goto out_unlock;
|
||||
|
||||
for (i = 0; i < FD_MAX_UNITS; i++) {
|
||||
unit[i].disk = alloc_disk(1);
|
||||
if (!unit[i].disk) {
|
||||
ret = -ENOMEM;
|
||||
memset(&unit[i].tag_set, 0, sizeof(unit[i].tag_set));
|
||||
unit[i].tag_set.ops = &ataflop_mq_ops;
|
||||
unit[i].tag_set.nr_hw_queues = 1;
|
||||
unit[i].tag_set.nr_maps = 1;
|
||||
unit[i].tag_set.queue_depth = 2;
|
||||
unit[i].tag_set.numa_node = NUMA_NO_NODE;
|
||||
unit[i].tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
|
||||
ret = blk_mq_alloc_tag_set(&unit[i].tag_set);
|
||||
if (ret)
|
||||
goto err;
|
||||
}
|
||||
|
||||
unit[i].disk->queue = blk_mq_init_sq_queue(&unit[i].tag_set,
|
||||
&ataflop_mq_ops, 2,
|
||||
BLK_MQ_F_SHOULD_MERGE);
|
||||
if (IS_ERR(unit[i].disk->queue)) {
|
||||
put_disk(unit[i].disk);
|
||||
ret = PTR_ERR(unit[i].disk->queue);
|
||||
unit[i].disk->queue = NULL;
|
||||
ret = ataflop_alloc_disk(i, 0);
|
||||
if (ret) {
|
||||
blk_mq_free_tag_set(&unit[i].tag_set);
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
|
@ -2027,19 +2070,9 @@ static int __init atari_floppy_init (void)
|
|||
for (i = 0; i < FD_MAX_UNITS; i++) {
|
||||
unit[i].track = -1;
|
||||
unit[i].flags = 0;
|
||||
unit[i].disk->major = FLOPPY_MAJOR;
|
||||
unit[i].disk->first_minor = i;
|
||||
sprintf(unit[i].disk->disk_name, "fd%d", i);
|
||||
unit[i].disk->fops = &floppy_fops;
|
||||
unit[i].disk->events = DISK_EVENT_MEDIA_CHANGE;
|
||||
unit[i].disk->private_data = &unit[i];
|
||||
set_capacity(unit[i].disk, MAX_DISK_SIZE * 2);
|
||||
add_disk(unit[i].disk);
|
||||
add_disk(unit[i].disk[0]);
|
||||
}
|
||||
|
||||
blk_register_region(MKDEV(FLOPPY_MAJOR, 0), 256, THIS_MODULE,
|
||||
floppy_find, NULL, NULL);
|
||||
|
||||
printk(KERN_INFO "Atari floppy driver: max. %cD, %strack buffering\n",
|
||||
DriveType == 0 ? 'D' : DriveType == 1 ? 'H' : 'E',
|
||||
UseTrackbuffer ? "" : "no ");
|
||||
|
@ -2049,14 +2082,14 @@ static int __init atari_floppy_init (void)
|
|||
|
||||
err:
|
||||
while (--i >= 0) {
|
||||
struct gendisk *disk = unit[i].disk;
|
||||
|
||||
blk_cleanup_queue(disk->queue);
|
||||
blk_cleanup_queue(unit[i].disk[0]->queue);
|
||||
put_disk(unit[i].disk[0]);
|
||||
blk_mq_free_tag_set(&unit[i].tag_set);
|
||||
put_disk(unit[i].disk);
|
||||
}
|
||||
|
||||
unregister_blkdev(FLOPPY_MAJOR, "fd");
|
||||
out_unlock:
|
||||
mutex_unlock(&ataflop_probe_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -2101,13 +2134,17 @@ __setup("floppy=", atari_floppy_setup);
|
|||
|
||||
static void __exit atari_floppy_exit(void)
|
||||
{
|
||||
int i;
|
||||
blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
|
||||
int i, type;
|
||||
|
||||
for (i = 0; i < FD_MAX_UNITS; i++) {
|
||||
del_gendisk(unit[i].disk);
|
||||
blk_cleanup_queue(unit[i].disk->queue);
|
||||
for (type = 0; type < NUM_DISK_MINORS; type++) {
|
||||
if (!unit[i].disk[type])
|
||||
continue;
|
||||
del_gendisk(unit[i].disk[type]);
|
||||
blk_cleanup_queue(unit[i].disk[type]->queue);
|
||||
put_disk(unit[i].disk[type]);
|
||||
}
|
||||
blk_mq_free_tag_set(&unit[i].tag_set);
|
||||
put_disk(unit[i].disk);
|
||||
}
|
||||
unregister_blkdev(FLOPPY_MAJOR, "fd");
|
||||
|
||||
|
|
|
@ -426,14 +426,15 @@ static void brd_free(struct brd_device *brd)
|
|||
kfree(brd);
|
||||
}
|
||||
|
||||
static struct brd_device *brd_init_one(int i, bool *new)
|
||||
static void brd_probe(dev_t dev)
|
||||
{
|
||||
struct brd_device *brd;
|
||||
int i = MINOR(dev) / max_part;
|
||||
|
||||
*new = false;
|
||||
mutex_lock(&brd_devices_mutex);
|
||||
list_for_each_entry(brd, &brd_devices, brd_list) {
|
||||
if (brd->brd_number == i)
|
||||
goto out;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
brd = brd_alloc(i);
|
||||
|
@ -442,9 +443,9 @@ static struct brd_device *brd_init_one(int i, bool *new)
|
|||
add_disk(brd->brd_disk);
|
||||
list_add_tail(&brd->brd_list, &brd_devices);
|
||||
}
|
||||
*new = true;
|
||||
out:
|
||||
return brd;
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&brd_devices_mutex);
|
||||
}
|
||||
|
||||
static void brd_del_one(struct brd_device *brd)
|
||||
|
@ -454,23 +455,6 @@ static void brd_del_one(struct brd_device *brd)
|
|||
brd_free(brd);
|
||||
}
|
||||
|
||||
static struct kobject *brd_probe(dev_t dev, int *part, void *data)
|
||||
{
|
||||
struct brd_device *brd;
|
||||
struct kobject *kobj;
|
||||
bool new;
|
||||
|
||||
mutex_lock(&brd_devices_mutex);
|
||||
brd = brd_init_one(MINOR(dev) / max_part, &new);
|
||||
kobj = brd ? get_disk_and_module(brd->brd_disk) : NULL;
|
||||
mutex_unlock(&brd_devices_mutex);
|
||||
|
||||
if (new)
|
||||
*part = 0;
|
||||
|
||||
return kobj;
|
||||
}
|
||||
|
||||
static inline void brd_check_and_reset_par(void)
|
||||
{
|
||||
if (unlikely(!max_part))
|
||||
|
@ -510,11 +494,12 @@ static int __init brd_init(void)
|
|||
* dynamically.
|
||||
*/
|
||||
|
||||
if (register_blkdev(RAMDISK_MAJOR, "ramdisk"))
|
||||
if (__register_blkdev(RAMDISK_MAJOR, "ramdisk", brd_probe))
|
||||
return -EIO;
|
||||
|
||||
brd_check_and_reset_par();
|
||||
|
||||
mutex_lock(&brd_devices_mutex);
|
||||
for (i = 0; i < rd_nr; i++) {
|
||||
brd = brd_alloc(i);
|
||||
if (!brd)
|
||||
|
@ -532,9 +517,7 @@ static int __init brd_init(void)
|
|||
brd->brd_disk->queue = brd->brd_queue;
|
||||
add_disk(brd->brd_disk);
|
||||
}
|
||||
|
||||
blk_register_region(MKDEV(RAMDISK_MAJOR, 0), 1UL << MINORBITS,
|
||||
THIS_MODULE, brd_probe, NULL, NULL);
|
||||
mutex_unlock(&brd_devices_mutex);
|
||||
|
||||
pr_info("brd: module loaded\n");
|
||||
return 0;
|
||||
|
@ -544,6 +527,7 @@ static int __init brd_init(void)
|
|||
list_del(&brd->brd_list);
|
||||
brd_free(brd);
|
||||
}
|
||||
mutex_unlock(&brd_devices_mutex);
|
||||
unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
|
||||
|
||||
pr_info("brd: module NOT loaded !!!\n");
|
||||
|
@ -557,7 +541,6 @@ static void __exit brd_exit(void)
|
|||
list_for_each_entry_safe(brd, next, &brd_devices, brd_list)
|
||||
brd_del_one(brd);
|
||||
|
||||
blk_unregister_region(MKDEV(RAMDISK_MAJOR, 0), 1UL << MINORBITS);
|
||||
unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
|
||||
|
||||
pr_info("brd: module unloaded\n");
|
||||
|
|
|
@ -2036,8 +2036,7 @@ void drbd_set_my_capacity(struct drbd_device *device, sector_t size)
|
|||
{
|
||||
char ppb[10];
|
||||
|
||||
set_capacity(device->vdisk, size);
|
||||
revalidate_disk_size(device->vdisk, false);
|
||||
set_capacity_and_notify(device->vdisk, size);
|
||||
|
||||
drbd_info(device, "size = %s (%llu KB)\n",
|
||||
ppsize(ppb, size>>1), (unsigned long long)size>>1);
|
||||
|
@ -2068,8 +2067,7 @@ void drbd_device_cleanup(struct drbd_device *device)
|
|||
}
|
||||
D_ASSERT(device, first_peer_device(device)->connection->net_conf == NULL);
|
||||
|
||||
set_capacity(device->vdisk, 0);
|
||||
revalidate_disk_size(device->vdisk, false);
|
||||
set_capacity_and_notify(device->vdisk, 0);
|
||||
if (device->bitmap) {
|
||||
/* maybe never allocated. */
|
||||
drbd_bm_resize(device, 0, 1);
|
||||
|
|
|
@ -2802,7 +2802,7 @@ bool drbd_rs_c_min_rate_throttle(struct drbd_device *device)
|
|||
if (c_min_rate == 0)
|
||||
return false;
|
||||
|
||||
curr_events = (int)part_stat_read_accum(&disk->part0, sectors) -
|
||||
curr_events = (int)part_stat_read_accum(disk->part0, sectors) -
|
||||
atomic_read(&device->rs_sect_ev);
|
||||
|
||||
if (atomic_read(&device->ap_actlog_cnt)
|
||||
|
|
|
@ -1678,7 +1678,8 @@ void drbd_rs_controller_reset(struct drbd_device *device)
|
|||
atomic_set(&device->rs_sect_in, 0);
|
||||
atomic_set(&device->rs_sect_ev, 0);
|
||||
device->rs_in_flight = 0;
|
||||
device->rs_last_events = (int)part_stat_read_accum(&disk->part0, sectors);
|
||||
device->rs_last_events =
|
||||
(int)part_stat_read_accum(disk->part0, sectors);
|
||||
|
||||
/* Updating the RCU protected object in place is necessary since
|
||||
this function gets called from atomic context.
|
||||
|
|
|
@ -402,7 +402,6 @@ static struct floppy_drive_params drive_params[N_DRIVE];
|
|||
static struct floppy_drive_struct drive_state[N_DRIVE];
|
||||
static struct floppy_write_errors write_errors[N_DRIVE];
|
||||
static struct timer_list motor_off_timer[N_DRIVE];
|
||||
static struct gendisk *disks[N_DRIVE];
|
||||
static struct blk_mq_tag_set tag_sets[N_DRIVE];
|
||||
static struct block_device *opened_bdev[N_DRIVE];
|
||||
static DEFINE_MUTEX(open_lock);
|
||||
|
@ -477,6 +476,8 @@ static struct floppy_struct floppy_type[32] = {
|
|||
{ 3200,20,2,80,0,0x1C,0x00,0xCF,0x2C,"H1600" }, /* 31 1.6MB 3.5" */
|
||||
};
|
||||
|
||||
static struct gendisk *disks[N_DRIVE][ARRAY_SIZE(floppy_type)];
|
||||
|
||||
#define SECTSIZE (_FD_SECTSIZE(*floppy))
|
||||
|
||||
/* Auto-detection: Disk type used until the next media change occurs. */
|
||||
|
@ -4111,7 +4112,7 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
|
|||
|
||||
new_dev = MINOR(bdev->bd_dev);
|
||||
drive_state[drive].fd_device = new_dev;
|
||||
set_capacity(disks[drive], floppy_sizes[new_dev]);
|
||||
set_capacity(disks[drive][ITYPE(new_dev)], floppy_sizes[new_dev]);
|
||||
if (old_dev != -1 && old_dev != new_dev) {
|
||||
if (buffer_drive == drive)
|
||||
buffer_track = -1;
|
||||
|
@ -4579,15 +4580,58 @@ static bool floppy_available(int drive)
|
|||
return true;
|
||||
}
|
||||
|
||||
static struct kobject *floppy_find(dev_t dev, int *part, void *data)
|
||||
static int floppy_alloc_disk(unsigned int drive, unsigned int type)
|
||||
{
|
||||
int drive = (*part & 3) | ((*part & 0x80) >> 5);
|
||||
if (drive >= N_DRIVE || !floppy_available(drive))
|
||||
return NULL;
|
||||
if (((*part >> 2) & 0x1f) >= ARRAY_SIZE(floppy_type))
|
||||
return NULL;
|
||||
*part = 0;
|
||||
return get_disk_and_module(disks[drive]);
|
||||
struct gendisk *disk;
|
||||
int err;
|
||||
|
||||
disk = alloc_disk(1);
|
||||
if (!disk)
|
||||
return -ENOMEM;
|
||||
|
||||
disk->queue = blk_mq_init_queue(&tag_sets[drive]);
|
||||
if (IS_ERR(disk->queue)) {
|
||||
err = PTR_ERR(disk->queue);
|
||||
disk->queue = NULL;
|
||||
put_disk(disk);
|
||||
return err;
|
||||
}
|
||||
|
||||
blk_queue_bounce_limit(disk->queue, BLK_BOUNCE_HIGH);
|
||||
blk_queue_max_hw_sectors(disk->queue, 64);
|
||||
disk->major = FLOPPY_MAJOR;
|
||||
disk->first_minor = TOMINOR(drive) | (type << 2);
|
||||
disk->fops = &floppy_fops;
|
||||
disk->events = DISK_EVENT_MEDIA_CHANGE;
|
||||
if (type)
|
||||
sprintf(disk->disk_name, "fd%d_type%d", drive, type);
|
||||
else
|
||||
sprintf(disk->disk_name, "fd%d", drive);
|
||||
/* to be cleaned up... */
|
||||
disk->private_data = (void *)(long)drive;
|
||||
disk->flags |= GENHD_FL_REMOVABLE;
|
||||
|
||||
disks[drive][type] = disk;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static DEFINE_MUTEX(floppy_probe_lock);
|
||||
|
||||
static void floppy_probe(dev_t dev)
|
||||
{
|
||||
unsigned int drive = (MINOR(dev) & 3) | ((MINOR(dev) & 0x80) >> 5);
|
||||
unsigned int type = (MINOR(dev) >> 2) & 0x1f;
|
||||
|
||||
if (drive >= N_DRIVE || !floppy_available(drive) ||
|
||||
type >= ARRAY_SIZE(floppy_type))
|
||||
return;
|
||||
|
||||
mutex_lock(&floppy_probe_lock);
|
||||
if (!disks[drive][type]) {
|
||||
if (floppy_alloc_disk(drive, type) == 0)
|
||||
add_disk(disks[drive][type]);
|
||||
}
|
||||
mutex_unlock(&floppy_probe_lock);
|
||||
}
|
||||
|
||||
static int __init do_floppy_init(void)
|
||||
|
@ -4609,33 +4653,25 @@ static int __init do_floppy_init(void)
|
|||
return -ENOMEM;
|
||||
|
||||
for (drive = 0; drive < N_DRIVE; drive++) {
|
||||
disks[drive] = alloc_disk(1);
|
||||
if (!disks[drive]) {
|
||||
err = -ENOMEM;
|
||||
memset(&tag_sets[drive], 0, sizeof(tag_sets[drive]));
|
||||
tag_sets[drive].ops = &floppy_mq_ops;
|
||||
tag_sets[drive].nr_hw_queues = 1;
|
||||
tag_sets[drive].nr_maps = 1;
|
||||
tag_sets[drive].queue_depth = 2;
|
||||
tag_sets[drive].numa_node = NUMA_NO_NODE;
|
||||
tag_sets[drive].flags = BLK_MQ_F_SHOULD_MERGE;
|
||||
err = blk_mq_alloc_tag_set(&tag_sets[drive]);
|
||||
if (err)
|
||||
goto out_put_disk;
|
||||
}
|
||||
|
||||
disks[drive]->queue = blk_mq_init_sq_queue(&tag_sets[drive],
|
||||
&floppy_mq_ops, 2,
|
||||
BLK_MQ_F_SHOULD_MERGE);
|
||||
if (IS_ERR(disks[drive]->queue)) {
|
||||
err = PTR_ERR(disks[drive]->queue);
|
||||
disks[drive]->queue = NULL;
|
||||
err = floppy_alloc_disk(drive, 0);
|
||||
if (err)
|
||||
goto out_put_disk;
|
||||
}
|
||||
|
||||
blk_queue_bounce_limit(disks[drive]->queue, BLK_BOUNCE_HIGH);
|
||||
blk_queue_max_hw_sectors(disks[drive]->queue, 64);
|
||||
disks[drive]->major = FLOPPY_MAJOR;
|
||||
disks[drive]->first_minor = TOMINOR(drive);
|
||||
disks[drive]->fops = &floppy_fops;
|
||||
disks[drive]->events = DISK_EVENT_MEDIA_CHANGE;
|
||||
sprintf(disks[drive]->disk_name, "fd%d", drive);
|
||||
|
||||
timer_setup(&motor_off_timer[drive], motor_off_callback, 0);
|
||||
}
|
||||
|
||||
err = register_blkdev(FLOPPY_MAJOR, "fd");
|
||||
err = __register_blkdev(FLOPPY_MAJOR, "fd", floppy_probe);
|
||||
if (err)
|
||||
goto out_put_disk;
|
||||
|
||||
|
@ -4643,9 +4679,6 @@ static int __init do_floppy_init(void)
|
|||
if (err)
|
||||
goto out_unreg_blkdev;
|
||||
|
||||
blk_register_region(MKDEV(FLOPPY_MAJOR, 0), 256, THIS_MODULE,
|
||||
floppy_find, NULL, NULL);
|
||||
|
||||
for (i = 0; i < 256; i++)
|
||||
if (ITYPE(i))
|
||||
floppy_sizes[i] = floppy_type[ITYPE(i)].size;
|
||||
|
@ -4673,7 +4706,7 @@ static int __init do_floppy_init(void)
|
|||
if (fdc_state[0].address == -1) {
|
||||
cancel_delayed_work(&fd_timeout);
|
||||
err = -ENODEV;
|
||||
goto out_unreg_region;
|
||||
goto out_unreg_driver;
|
||||
}
|
||||
#if N_FDC > 1
|
||||
fdc_state[1].address = FDC2;
|
||||
|
@ -4684,7 +4717,7 @@ static int __init do_floppy_init(void)
|
|||
if (err) {
|
||||
cancel_delayed_work(&fd_timeout);
|
||||
err = -EBUSY;
|
||||
goto out_unreg_region;
|
||||
goto out_unreg_driver;
|
||||
}
|
||||
|
||||
/* initialise drive state */
|
||||
|
@ -4761,10 +4794,8 @@ static int __init do_floppy_init(void)
|
|||
if (err)
|
||||
goto out_remove_drives;
|
||||
|
||||
/* to be cleaned up... */
|
||||
disks[drive]->private_data = (void *)(long)drive;
|
||||
disks[drive]->flags |= GENHD_FL_REMOVABLE;
|
||||
device_add_disk(&floppy_device[drive].dev, disks[drive], NULL);
|
||||
device_add_disk(&floppy_device[drive].dev, disks[drive][0],
|
||||
NULL);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -4772,30 +4803,27 @@ static int __init do_floppy_init(void)
|
|||
out_remove_drives:
|
||||
while (drive--) {
|
||||
if (floppy_available(drive)) {
|
||||
del_gendisk(disks[drive]);
|
||||
del_gendisk(disks[drive][0]);
|
||||
platform_device_unregister(&floppy_device[drive]);
|
||||
}
|
||||
}
|
||||
out_release_dma:
|
||||
if (atomic_read(&usage_count))
|
||||
floppy_release_irq_and_dma();
|
||||
out_unreg_region:
|
||||
blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
|
||||
out_unreg_driver:
|
||||
platform_driver_unregister(&floppy_driver);
|
||||
out_unreg_blkdev:
|
||||
unregister_blkdev(FLOPPY_MAJOR, "fd");
|
||||
out_put_disk:
|
||||
destroy_workqueue(floppy_wq);
|
||||
for (drive = 0; drive < N_DRIVE; drive++) {
|
||||
if (!disks[drive])
|
||||
if (!disks[drive][0])
|
||||
break;
|
||||
if (disks[drive]->queue) {
|
||||
del_timer_sync(&motor_off_timer[drive]);
|
||||
blk_cleanup_queue(disks[drive]->queue);
|
||||
disks[drive]->queue = NULL;
|
||||
blk_mq_free_tag_set(&tag_sets[drive]);
|
||||
}
|
||||
put_disk(disks[drive]);
|
||||
del_timer_sync(&motor_off_timer[drive]);
|
||||
blk_cleanup_queue(disks[drive][0]->queue);
|
||||
disks[drive][0]->queue = NULL;
|
||||
blk_mq_free_tag_set(&tag_sets[drive]);
|
||||
put_disk(disks[drive][0]);
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
@ -5006,9 +5034,8 @@ module_init(floppy_module_init);
|
|||
|
||||
static void __exit floppy_module_exit(void)
|
||||
{
|
||||
int drive;
|
||||
int drive, i;
|
||||
|
||||
blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
|
||||
unregister_blkdev(FLOPPY_MAJOR, "fd");
|
||||
platform_driver_unregister(&floppy_driver);
|
||||
|
||||
|
@ -5018,10 +5045,16 @@ static void __exit floppy_module_exit(void)
|
|||
del_timer_sync(&motor_off_timer[drive]);
|
||||
|
||||
if (floppy_available(drive)) {
|
||||
del_gendisk(disks[drive]);
|
||||
for (i = 0; i < ARRAY_SIZE(floppy_type); i++) {
|
||||
if (disks[drive][i])
|
||||
del_gendisk(disks[drive][i]);
|
||||
}
|
||||
platform_device_unregister(&floppy_device[drive]);
|
||||
}
|
||||
blk_cleanup_queue(disks[drive]->queue);
|
||||
for (i = 0; i < ARRAY_SIZE(floppy_type); i++) {
|
||||
if (disks[drive][i])
|
||||
blk_cleanup_queue(disks[drive][i]->queue);
|
||||
}
|
||||
blk_mq_free_tag_set(&tag_sets[drive]);
|
||||
|
||||
/*
|
||||
|
@ -5029,10 +5062,17 @@ static void __exit floppy_module_exit(void)
|
|||
* queue reference in put_disk().
|
||||
*/
|
||||
if (!(allowed_drive_mask & (1 << drive)) ||
|
||||
fdc_state[FDC(drive)].version == FDC_NONE)
|
||||
disks[drive]->queue = NULL;
|
||||
fdc_state[FDC(drive)].version == FDC_NONE) {
|
||||
for (i = 0; i < ARRAY_SIZE(floppy_type); i++) {
|
||||
if (disks[drive][i])
|
||||
disks[drive][i]->queue = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
put_disk(disks[drive]);
|
||||
for (i = 0; i < ARRAY_SIZE(floppy_type); i++) {
|
||||
if (disks[drive][i])
|
||||
put_disk(disks[drive][i]);
|
||||
}
|
||||
}
|
||||
|
||||
cancel_delayed_work_sync(&fd_timeout);
|
||||
|
|
|
@ -251,12 +251,8 @@ loop_validate_block_size(unsigned short bsize)
|
|||
*/
|
||||
static void loop_set_size(struct loop_device *lo, loff_t size)
|
||||
{
|
||||
struct block_device *bdev = lo->lo_device;
|
||||
|
||||
bd_set_nr_sectors(bdev, size);
|
||||
|
||||
if (!set_capacity_revalidate_and_notify(lo->lo_disk, size, false))
|
||||
kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
|
||||
if (!set_capacity_and_notify(lo->lo_disk, size))
|
||||
kobject_uevent(&disk_to_dev(lo->lo_disk)->kobj, KOBJ_CHANGE);
|
||||
}
|
||||
|
||||
static inline int
|
||||
|
@ -679,10 +675,10 @@ static int loop_validate_file(struct file *file, struct block_device *bdev)
|
|||
while (is_loop_device(f)) {
|
||||
struct loop_device *l;
|
||||
|
||||
if (f->f_mapping->host->i_bdev == bdev)
|
||||
if (f->f_mapping->host->i_rdev == bdev->bd_dev)
|
||||
return -EBADF;
|
||||
|
||||
l = f->f_mapping->host->i_bdev->bd_disk->private_data;
|
||||
l = I_BDEV(f->f_mapping->host)->bd_disk->private_data;
|
||||
if (l->lo_state != Lo_bound) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -889,9 +885,7 @@ static void loop_config_discard(struct loop_device *lo)
|
|||
* file-backed loop devices: discarded regions read back as zero.
|
||||
*/
|
||||
if (S_ISBLK(inode->i_mode) && !lo->lo_encrypt_key_size) {
|
||||
struct request_queue *backingq;
|
||||
|
||||
backingq = bdev_get_queue(inode->i_bdev);
|
||||
struct request_queue *backingq = bdev_get_queue(I_BDEV(inode));
|
||||
|
||||
max_discard_sectors = backingq->limits.max_write_zeroes_sectors;
|
||||
granularity = backingq->limits.discard_granularity ?:
|
||||
|
@ -1075,7 +1069,6 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
|
|||
struct file *file;
|
||||
struct inode *inode;
|
||||
struct address_space *mapping;
|
||||
struct block_device *claimed_bdev = NULL;
|
||||
int error;
|
||||
loff_t size;
|
||||
bool partscan;
|
||||
|
@ -1094,8 +1087,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
|
|||
* here to avoid changing device under exclusive owner.
|
||||
*/
|
||||
if (!(mode & FMODE_EXCL)) {
|
||||
claimed_bdev = bdev->bd_contains;
|
||||
error = bd_prepare_to_claim(bdev, claimed_bdev, loop_configure);
|
||||
error = bd_prepare_to_claim(bdev, loop_configure);
|
||||
if (error)
|
||||
goto out_putf;
|
||||
}
|
||||
|
@ -1138,7 +1130,7 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
|
|||
if (error)
|
||||
goto out_unlock;
|
||||
|
||||
set_device_ro(bdev, (lo->lo_flags & LO_FLAGS_READ_ONLY) != 0);
|
||||
set_disk_ro(lo->lo_disk, (lo->lo_flags & LO_FLAGS_READ_ONLY) != 0);
|
||||
|
||||
lo->use_dio = lo->lo_flags & LO_FLAGS_DIRECT_IO;
|
||||
lo->lo_device = bdev;
|
||||
|
@ -1168,9 +1160,6 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
|
|||
size = get_loop_size(lo, file);
|
||||
loop_set_size(lo, size);
|
||||
|
||||
set_blocksize(bdev, S_ISBLK(inode->i_mode) ?
|
||||
block_size(inode->i_bdev) : PAGE_SIZE);
|
||||
|
||||
lo->lo_state = Lo_bound;
|
||||
if (part_shift)
|
||||
lo->lo_flags |= LO_FLAGS_PARTSCAN;
|
||||
|
@ -1185,15 +1174,15 @@ static int loop_configure(struct loop_device *lo, fmode_t mode,
|
|||
mutex_unlock(&loop_ctl_mutex);
|
||||
if (partscan)
|
||||
loop_reread_partitions(lo, bdev);
|
||||
if (claimed_bdev)
|
||||
bd_abort_claiming(bdev, claimed_bdev, loop_configure);
|
||||
if (!(mode & FMODE_EXCL))
|
||||
bd_abort_claiming(bdev, loop_configure);
|
||||
return 0;
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&loop_ctl_mutex);
|
||||
out_bdev:
|
||||
if (claimed_bdev)
|
||||
bd_abort_claiming(bdev, claimed_bdev, loop_configure);
|
||||
if (!(mode & FMODE_EXCL))
|
||||
bd_abort_claiming(bdev, loop_configure);
|
||||
out_putf:
|
||||
fput(file);
|
||||
out:
|
||||
|
@ -1252,7 +1241,6 @@ static int __loop_clr_fd(struct loop_device *lo, bool release)
|
|||
set_capacity(lo->lo_disk, 0);
|
||||
loop_sysfs_exit(lo);
|
||||
if (bdev) {
|
||||
bd_set_nr_sectors(bdev, 0);
|
||||
/* let user-space know about this change */
|
||||
kobject_uevent(&disk_to_dev(bdev->bd_disk)->kobj, KOBJ_CHANGE);
|
||||
}
|
||||
|
@ -2235,24 +2223,18 @@ static int loop_lookup(struct loop_device **l, int i)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static struct kobject *loop_probe(dev_t dev, int *part, void *data)
|
||||
static void loop_probe(dev_t dev)
|
||||
{
|
||||
int idx = MINOR(dev) >> part_shift;
|
||||
struct loop_device *lo;
|
||||
struct kobject *kobj;
|
||||
int err;
|
||||
|
||||
if (max_loop && idx >= max_loop)
|
||||
return;
|
||||
|
||||
mutex_lock(&loop_ctl_mutex);
|
||||
err = loop_lookup(&lo, MINOR(dev) >> part_shift);
|
||||
if (err < 0)
|
||||
err = loop_add(&lo, MINOR(dev) >> part_shift);
|
||||
if (err < 0)
|
||||
kobj = NULL;
|
||||
else
|
||||
kobj = get_disk_and_module(lo->lo_disk);
|
||||
if (loop_lookup(&lo, idx) < 0)
|
||||
loop_add(&lo, idx);
|
||||
mutex_unlock(&loop_ctl_mutex);
|
||||
|
||||
*part = 0;
|
||||
return kobj;
|
||||
}
|
||||
|
||||
static long loop_control_ioctl(struct file *file, unsigned int cmd,
|
||||
|
@ -2372,14 +2354,11 @@ static int __init loop_init(void)
|
|||
goto err_out;
|
||||
|
||||
|
||||
if (register_blkdev(LOOP_MAJOR, "loop")) {
|
||||
if (__register_blkdev(LOOP_MAJOR, "loop", loop_probe)) {
|
||||
err = -EIO;
|
||||
goto misc_out;
|
||||
}
|
||||
|
||||
blk_register_region(MKDEV(LOOP_MAJOR, 0), range,
|
||||
THIS_MODULE, loop_probe, NULL, NULL);
|
||||
|
||||
/* pre-create number of devices given by config or max_loop */
|
||||
mutex_lock(&loop_ctl_mutex);
|
||||
for (i = 0; i < nr; i++)
|
||||
|
@ -2405,16 +2384,11 @@ static int loop_exit_cb(int id, void *ptr, void *data)
|
|||
|
||||
static void __exit loop_exit(void)
|
||||
{
|
||||
unsigned long range;
|
||||
|
||||
range = max_loop ? max_loop << part_shift : 1UL << MINORBITS;
|
||||
|
||||
mutex_lock(&loop_ctl_mutex);
|
||||
|
||||
idr_for_each(&loop_index_idr, &loop_exit_cb, NULL);
|
||||
idr_destroy(&loop_index_idr);
|
||||
|
||||
blk_unregister_region(MKDEV(LOOP_MAJOR, 0), range);
|
||||
unregister_blkdev(LOOP_MAJOR, "loop");
|
||||
|
||||
misc_deregister(&loop_misc);
|
||||
|
|
|
@ -3687,7 +3687,6 @@ static int mtip_block_initialize(struct driver_data *dd)
|
|||
/* Enable the block device and add it to /dev */
|
||||
device_add_disk(&dd->pdev->dev, dd->disk, NULL);
|
||||
|
||||
dd->bdev = bdget_disk(dd->disk, 0);
|
||||
/*
|
||||
* Now that the disk is active, initialize any sysfs attributes
|
||||
* managed by the protocol layer.
|
||||
|
@ -3721,9 +3720,6 @@ static int mtip_block_initialize(struct driver_data *dd)
|
|||
return rv;
|
||||
|
||||
kthread_run_error:
|
||||
bdput(dd->bdev);
|
||||
dd->bdev = NULL;
|
||||
|
||||
/* Delete our gendisk. This also removes the device from /dev */
|
||||
del_gendisk(dd->disk);
|
||||
|
||||
|
@ -3804,14 +3800,6 @@ static int mtip_block_remove(struct driver_data *dd)
|
|||
blk_mq_tagset_busy_iter(&dd->tags, mtip_no_dev_cleanup, dd);
|
||||
blk_mq_unquiesce_queue(dd->queue);
|
||||
|
||||
/*
|
||||
* Delete our gendisk structure. This also removes the device
|
||||
* from /dev
|
||||
*/
|
||||
if (dd->bdev) {
|
||||
bdput(dd->bdev);
|
||||
dd->bdev = NULL;
|
||||
}
|
||||
if (dd->disk) {
|
||||
if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag))
|
||||
del_gendisk(dd->disk);
|
||||
|
@ -4206,9 +4194,6 @@ static void mtip_pci_remove(struct pci_dev *pdev)
|
|||
} while (atomic_read(&dd->irq_workers_active) != 0 &&
|
||||
time_before(jiffies, to));
|
||||
|
||||
if (!dd->sr)
|
||||
fsync_bdev(dd->bdev);
|
||||
|
||||
if (atomic_read(&dd->irq_workers_active) != 0) {
|
||||
dev_warn(&dd->pdev->dev,
|
||||
"Completion workers still active!\n");
|
||||
|
|
|
@ -463,8 +463,6 @@ struct driver_data {
|
|||
|
||||
int isr_binding;
|
||||
|
||||
struct block_device *bdev;
|
||||
|
||||
struct list_head online_list; /* linkage for online list */
|
||||
|
||||
struct list_head remove_list; /* linkage for removing list */
|
||||
|
|
|
@ -296,40 +296,32 @@ static void nbd_size_clear(struct nbd_device *nbd)
|
|||
}
|
||||
}
|
||||
|
||||
static void nbd_size_update(struct nbd_device *nbd, bool start)
|
||||
static int nbd_set_size(struct nbd_device *nbd, loff_t bytesize,
|
||||
loff_t blksize)
|
||||
{
|
||||
struct nbd_config *config = nbd->config;
|
||||
struct block_device *bdev = bdget_disk(nbd->disk, 0);
|
||||
sector_t nr_sectors = config->bytesize >> 9;
|
||||
if (!blksize)
|
||||
blksize = NBD_DEF_BLKSIZE;
|
||||
if (blksize < 512 || blksize > PAGE_SIZE || !is_power_of_2(blksize))
|
||||
return -EINVAL;
|
||||
|
||||
if (config->flags & NBD_FLAG_SEND_TRIM) {
|
||||
nbd->disk->queue->limits.discard_granularity = config->blksize;
|
||||
nbd->disk->queue->limits.discard_alignment = config->blksize;
|
||||
nbd->config->bytesize = bytesize;
|
||||
nbd->config->blksize = blksize;
|
||||
|
||||
if (!nbd->task_recv)
|
||||
return 0;
|
||||
|
||||
if (nbd->config->flags & NBD_FLAG_SEND_TRIM) {
|
||||
nbd->disk->queue->limits.discard_granularity = blksize;
|
||||
nbd->disk->queue->limits.discard_alignment = blksize;
|
||||
blk_queue_max_discard_sectors(nbd->disk->queue, UINT_MAX);
|
||||
}
|
||||
blk_queue_logical_block_size(nbd->disk->queue, config->blksize);
|
||||
blk_queue_physical_block_size(nbd->disk->queue, config->blksize);
|
||||
set_capacity(nbd->disk, nr_sectors);
|
||||
if (bdev) {
|
||||
if (bdev->bd_disk) {
|
||||
bd_set_nr_sectors(bdev, nr_sectors);
|
||||
if (start)
|
||||
set_blocksize(bdev, config->blksize);
|
||||
} else
|
||||
set_bit(GD_NEED_PART_SCAN, &nbd->disk->state);
|
||||
bdput(bdev);
|
||||
}
|
||||
kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
|
||||
}
|
||||
blk_queue_logical_block_size(nbd->disk->queue, blksize);
|
||||
blk_queue_physical_block_size(nbd->disk->queue, blksize);
|
||||
|
||||
static void nbd_size_set(struct nbd_device *nbd, loff_t blocksize,
|
||||
loff_t nr_blocks)
|
||||
{
|
||||
struct nbd_config *config = nbd->config;
|
||||
config->blksize = blocksize;
|
||||
config->bytesize = blocksize * nr_blocks;
|
||||
if (nbd->task_recv != NULL)
|
||||
nbd_size_update(nbd, false);
|
||||
set_bit(GD_NEED_PART_SCAN, &nbd->disk->state);
|
||||
if (!set_capacity_and_notify(nbd->disk, bytesize >> 9))
|
||||
kobject_uevent(&nbd_to_dev(nbd)->kobj, KOBJ_CHANGE);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void nbd_complete_rq(struct request *req)
|
||||
|
@ -1140,7 +1132,7 @@ static void nbd_bdev_reset(struct block_device *bdev)
|
|||
{
|
||||
if (bdev->bd_openers > 1)
|
||||
return;
|
||||
bd_set_nr_sectors(bdev, 0);
|
||||
set_capacity(bdev->bd_disk, 0);
|
||||
}
|
||||
|
||||
static void nbd_parse_flags(struct nbd_device *nbd)
|
||||
|
@ -1309,8 +1301,7 @@ static int nbd_start_device(struct nbd_device *nbd)
|
|||
args->index = i;
|
||||
queue_work(nbd->recv_workq, &args->work);
|
||||
}
|
||||
nbd_size_update(nbd, true);
|
||||
return error;
|
||||
return nbd_set_size(nbd, config->bytesize, config->blksize);
|
||||
}
|
||||
|
||||
static int nbd_start_device_ioctl(struct nbd_device *nbd, struct block_device *bdev)
|
||||
|
@ -1352,14 +1343,6 @@ static void nbd_clear_sock_ioctl(struct nbd_device *nbd,
|
|||
nbd_config_put(nbd);
|
||||
}
|
||||
|
||||
static bool nbd_is_valid_blksize(unsigned long blksize)
|
||||
{
|
||||
if (!blksize || !is_power_of_2(blksize) || blksize < 512 ||
|
||||
blksize > PAGE_SIZE)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
static void nbd_set_cmd_timeout(struct nbd_device *nbd, u64 timeout)
|
||||
{
|
||||
nbd->tag_set.timeout = timeout * HZ;
|
||||
|
@ -1384,20 +1367,12 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd,
|
|||
case NBD_SET_SOCK:
|
||||
return nbd_add_socket(nbd, arg, false);
|
||||
case NBD_SET_BLKSIZE:
|
||||
if (!arg)
|
||||
arg = NBD_DEF_BLKSIZE;
|
||||
if (!nbd_is_valid_blksize(arg))
|
||||
return -EINVAL;
|
||||
nbd_size_set(nbd, arg,
|
||||
div_s64(config->bytesize, arg));
|
||||
return 0;
|
||||
return nbd_set_size(nbd, config->bytesize, arg);
|
||||
case NBD_SET_SIZE:
|
||||
nbd_size_set(nbd, config->blksize,
|
||||
div_s64(arg, config->blksize));
|
||||
return 0;
|
||||
return nbd_set_size(nbd, arg, config->blksize);
|
||||
case NBD_SET_SIZE_BLOCKS:
|
||||
nbd_size_set(nbd, config->blksize, arg);
|
||||
return 0;
|
||||
return nbd_set_size(nbd, arg * config->blksize,
|
||||
config->blksize);
|
||||
case NBD_SET_TIMEOUT:
|
||||
nbd_set_cmd_timeout(nbd, arg);
|
||||
return 0;
|
||||
|
@ -1513,12 +1488,10 @@ static int nbd_open(struct block_device *bdev, fmode_t mode)
|
|||
static void nbd_release(struct gendisk *disk, fmode_t mode)
|
||||
{
|
||||
struct nbd_device *nbd = disk->private_data;
|
||||
struct block_device *bdev = bdget_disk(disk, 0);
|
||||
|
||||
if (test_bit(NBD_RT_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) &&
|
||||
bdev->bd_openers == 0)
|
||||
disk->part0->bd_openers == 0)
|
||||
nbd_disconnect_and_put(nbd);
|
||||
bdput(bdev);
|
||||
|
||||
nbd_config_put(nbd);
|
||||
nbd_put(nbd);
|
||||
|
@ -1815,18 +1788,11 @@ static int nbd_genl_size_set(struct genl_info *info, struct nbd_device *nbd)
|
|||
if (info->attrs[NBD_ATTR_SIZE_BYTES])
|
||||
bytes = nla_get_u64(info->attrs[NBD_ATTR_SIZE_BYTES]);
|
||||
|
||||
if (info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]) {
|
||||
if (info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES])
|
||||
bsize = nla_get_u64(info->attrs[NBD_ATTR_BLOCK_SIZE_BYTES]);
|
||||
if (!bsize)
|
||||
bsize = NBD_DEF_BLKSIZE;
|
||||
if (!nbd_is_valid_blksize(bsize)) {
|
||||
printk(KERN_ERR "Invalid block size %llu\n", bsize);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
if (bytes != config->bytesize || bsize != config->blksize)
|
||||
nbd_size_set(nbd, bsize, div64_u64(bytes, bsize));
|
||||
return nbd_set_size(nbd, bytes, bsize);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -2130,8 +2130,7 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write)
|
|||
}
|
||||
|
||||
set_capacity(pd->disk, lba << 2);
|
||||
set_capacity(pd->bdev->bd_disk, lba << 2);
|
||||
bd_set_nr_sectors(pd->bdev, lba << 2);
|
||||
set_capacity_and_notify(pd->bdev->bd_disk, lba << 2);
|
||||
|
||||
q = bdev_get_queue(pd->bdev);
|
||||
if (write) {
|
||||
|
@ -2584,9 +2583,11 @@ static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
|
|||
case CDROM_LAST_WRITTEN:
|
||||
case CDROM_SEND_PACKET:
|
||||
case SCSI_IOCTL_SEND_COMMAND:
|
||||
ret = __blkdev_driver_ioctl(pd->bdev, mode, cmd, arg);
|
||||
if (!bdev->bd_disk->fops->ioctl)
|
||||
ret = -ENOTTY;
|
||||
else
|
||||
ret = bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg);
|
||||
break;
|
||||
|
||||
default:
|
||||
pkt_dbg(2, pd, "Unknown ioctl (%x)\n", cmd);
|
||||
ret = -ENOTTY;
|
||||
|
|
|
@ -692,12 +692,9 @@ static void rbd_release(struct gendisk *disk, fmode_t mode)
|
|||
put_device(&rbd_dev->dev);
|
||||
}
|
||||
|
||||
static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg)
|
||||
static int rbd_set_read_only(struct block_device *bdev, bool ro)
|
||||
{
|
||||
int ro;
|
||||
|
||||
if (get_user(ro, (int __user *)arg))
|
||||
return -EFAULT;
|
||||
struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
|
||||
|
||||
/*
|
||||
* Both images mapped read-only and snapshots can't be marked
|
||||
|
@ -710,43 +707,14 @@ static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg)
|
|||
rbd_assert(!rbd_is_snap(rbd_dev));
|
||||
}
|
||||
|
||||
/* Let blkdev_roset() handle it */
|
||||
return -ENOTTY;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int rbd_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
|
||||
int ret;
|
||||
|
||||
switch (cmd) {
|
||||
case BLKROSET:
|
||||
ret = rbd_ioctl_set_ro(rbd_dev, arg);
|
||||
break;
|
||||
default:
|
||||
ret = -ENOTTY;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
static int rbd_compat_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
return rbd_ioctl(bdev, mode, cmd, arg);
|
||||
}
|
||||
#endif /* CONFIG_COMPAT */
|
||||
|
||||
static const struct block_device_operations rbd_bd_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = rbd_open,
|
||||
.release = rbd_release,
|
||||
.ioctl = rbd_ioctl,
|
||||
#ifdef CONFIG_COMPAT
|
||||
.compat_ioctl = rbd_compat_ioctl,
|
||||
#endif
|
||||
.set_read_only = rbd_set_read_only,
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -4920,8 +4888,7 @@ static void rbd_dev_update_size(struct rbd_device *rbd_dev)
|
|||
!test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) {
|
||||
size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
|
||||
dout("setting size to %llu sectors", (unsigned long long)size);
|
||||
set_capacity(rbd_dev->disk, size);
|
||||
revalidate_disk_size(rbd_dev->disk, true);
|
||||
set_capacity_and_notify(rbd_dev->disk, size);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -100,8 +100,7 @@ static int rnbd_clt_change_capacity(struct rnbd_clt_dev *dev,
|
|||
rnbd_clt_info(dev, "Device size changed from %zu to %zu sectors\n",
|
||||
dev->nsectors, new_nsectors);
|
||||
dev->nsectors = new_nsectors;
|
||||
set_capacity(dev->gd, dev->nsectors);
|
||||
revalidate_disk_size(dev->gd, true);
|
||||
set_capacity_and_notify(dev->gd, dev->nsectors);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -745,18 +745,6 @@ static const struct block_device_operations floppy_fops = {
|
|||
.check_events = floppy_check_events,
|
||||
};
|
||||
|
||||
static struct kobject *floppy_find(dev_t dev, int *part, void *data)
|
||||
{
|
||||
struct swim_priv *swd = data;
|
||||
int drive = (*part & 3);
|
||||
|
||||
if (drive >= swd->floppy_count)
|
||||
return NULL;
|
||||
|
||||
*part = 0;
|
||||
return get_disk_and_module(swd->unit[drive].disk);
|
||||
}
|
||||
|
||||
static int swim_add_floppy(struct swim_priv *swd, enum drive_location location)
|
||||
{
|
||||
struct floppy_state *fs = &swd->unit[swd->floppy_count];
|
||||
|
@ -846,9 +834,6 @@ static int swim_floppy_init(struct swim_priv *swd)
|
|||
add_disk(swd->unit[drive].disk);
|
||||
}
|
||||
|
||||
blk_register_region(MKDEV(FLOPPY_MAJOR, 0), 256, THIS_MODULE,
|
||||
floppy_find, NULL, swd);
|
||||
|
||||
return 0;
|
||||
|
||||
exit_put_disks:
|
||||
|
@ -932,8 +917,6 @@ static int swim_remove(struct platform_device *dev)
|
|||
int drive;
|
||||
struct resource *res;
|
||||
|
||||
blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256);
|
||||
|
||||
for (drive = 0; drive < swd->floppy_count; drive++) {
|
||||
del_gendisk(swd->unit[drive].disk);
|
||||
blk_cleanup_queue(swd->unit[drive].disk->queue);
|
||||
|
|
|
@ -470,7 +470,7 @@ static void virtblk_update_capacity(struct virtio_blk *vblk, bool resize)
|
|||
cap_str_10,
|
||||
cap_str_2);
|
||||
|
||||
set_capacity_revalidate_and_notify(vblk->disk, capacity, true);
|
||||
set_capacity_and_notify(vblk->disk, capacity);
|
||||
}
|
||||
|
||||
static void virtblk_config_changed_work(struct work_struct *work)
|
||||
|
@ -598,7 +598,6 @@ static void virtblk_update_cache_mode(struct virtio_device *vdev)
|
|||
struct virtio_blk *vblk = vdev->priv;
|
||||
|
||||
blk_queue_write_cache(vblk->disk->queue, writeback, false);
|
||||
revalidate_disk_size(vblk->disk, true);
|
||||
}
|
||||
|
||||
static const char *const virtblk_cache_types[] = {
|
||||
|
|
|
@ -356,9 +356,7 @@ struct pending_req {
|
|||
};
|
||||
|
||||
|
||||
#define vbd_sz(_v) ((_v)->bdev->bd_part ? \
|
||||
(_v)->bdev->bd_part->nr_sects : \
|
||||
get_capacity((_v)->bdev->bd_disk))
|
||||
#define vbd_sz(_v) bdev_nr_sectors((_v)->bdev)
|
||||
|
||||
#define xen_blkif_get(_b) (atomic_inc(&(_b)->refcnt))
|
||||
#define xen_blkif_put(_b) \
|
||||
|
|
|
@ -2153,7 +2153,7 @@ static void blkfront_closing(struct blkfront_info *info)
|
|||
}
|
||||
|
||||
if (info->gd)
|
||||
bdev = bdget_disk(info->gd, 0);
|
||||
bdev = bdgrab(info->gd->part0);
|
||||
|
||||
mutex_unlock(&info->mutex);
|
||||
|
||||
|
@ -2370,7 +2370,7 @@ static void blkfront_connect(struct blkfront_info *info)
|
|||
return;
|
||||
printk(KERN_INFO "Setting capacity to %Lu\n",
|
||||
sectors);
|
||||
set_capacity_revalidate_and_notify(info->gd, sectors, true);
|
||||
set_capacity_and_notify(info->gd, sectors);
|
||||
|
||||
return;
|
||||
case BLKIF_STATE_SUSPENDED:
|
||||
|
@ -2518,7 +2518,7 @@ static int blkfront_remove(struct xenbus_device *xbdev)
|
|||
|
||||
disk = info->gd;
|
||||
if (disk)
|
||||
bdev = bdget_disk(disk, 0);
|
||||
bdev = bdgrab(disk->part0);
|
||||
|
||||
info->xbdev = NULL;
|
||||
mutex_unlock(&info->mutex);
|
||||
|
@ -2595,19 +2595,11 @@ static int blkif_open(struct block_device *bdev, fmode_t mode)
|
|||
static void blkif_release(struct gendisk *disk, fmode_t mode)
|
||||
{
|
||||
struct blkfront_info *info = disk->private_data;
|
||||
struct block_device *bdev;
|
||||
struct xenbus_device *xbdev;
|
||||
|
||||
mutex_lock(&blkfront_mutex);
|
||||
|
||||
bdev = bdget_disk(disk, 0);
|
||||
|
||||
if (!bdev) {
|
||||
WARN(1, "Block device %s yanked out from us!\n", disk->disk_name);
|
||||
if (disk->part0->bd_openers)
|
||||
goto out_mutex;
|
||||
}
|
||||
if (bdev->bd_openers)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
* Check if we have been instructed to close. We will have
|
||||
|
@ -2619,7 +2611,7 @@ static void blkif_release(struct gendisk *disk, fmode_t mode)
|
|||
|
||||
if (xbdev && xbdev->state == XenbusStateClosing) {
|
||||
/* pending switch to state closed */
|
||||
dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n");
|
||||
dev_info(disk_to_dev(disk), "releasing disk\n");
|
||||
xlvbd_release_gendisk(info);
|
||||
xenbus_frontend_closed(info->xbdev);
|
||||
}
|
||||
|
@ -2628,14 +2620,12 @@ static void blkif_release(struct gendisk *disk, fmode_t mode)
|
|||
|
||||
if (!xbdev) {
|
||||
/* sudden device removal */
|
||||
dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n");
|
||||
dev_info(disk_to_dev(disk), "releasing disk\n");
|
||||
xlvbd_release_gendisk(info);
|
||||
disk->private_data = NULL;
|
||||
free_info(info);
|
||||
}
|
||||
|
||||
out:
|
||||
bdput(bdev);
|
||||
out_mutex:
|
||||
mutex_unlock(&blkfront_mutex);
|
||||
}
|
||||
|
|
|
@ -42,7 +42,6 @@
|
|||
|
||||
#include <linux/zorro.h>
|
||||
|
||||
|
||||
#define Z2MINOR_COMBINED (0)
|
||||
#define Z2MINOR_Z2ONLY (1)
|
||||
#define Z2MINOR_CHIPONLY (2)
|
||||
|
@ -50,28 +49,28 @@
|
|||
#define Z2MINOR_MEMLIST2 (5)
|
||||
#define Z2MINOR_MEMLIST3 (6)
|
||||
#define Z2MINOR_MEMLIST4 (7)
|
||||
#define Z2MINOR_COUNT (8) /* Move this down when adding a new minor */
|
||||
#define Z2MINOR_COUNT (8) /* Move this down when adding a new minor */
|
||||
|
||||
#define Z2RAM_CHUNK1024 ( Z2RAM_CHUNKSIZE >> 10 )
|
||||
|
||||
static DEFINE_MUTEX(z2ram_mutex);
|
||||
static u_long *z2ram_map = NULL;
|
||||
static u_long z2ram_size = 0;
|
||||
static int z2_count = 0;
|
||||
static int chip_count = 0;
|
||||
static int list_count = 0;
|
||||
static int current_device = -1;
|
||||
static u_long *z2ram_map = NULL;
|
||||
static u_long z2ram_size = 0;
|
||||
static int z2_count = 0;
|
||||
static int chip_count = 0;
|
||||
static int list_count = 0;
|
||||
static int current_device = -1;
|
||||
|
||||
static DEFINE_SPINLOCK(z2ram_lock);
|
||||
|
||||
static struct gendisk *z2ram_gendisk;
|
||||
static struct gendisk *z2ram_gendisk[Z2MINOR_COUNT];
|
||||
|
||||
static blk_status_t z2_queue_rq(struct blk_mq_hw_ctx *hctx,
|
||||
const struct blk_mq_queue_data *bd)
|
||||
{
|
||||
struct request *req = bd->rq;
|
||||
unsigned long start = blk_rq_pos(req) << 9;
|
||||
unsigned long len = blk_rq_cur_bytes(req);
|
||||
unsigned long len = blk_rq_cur_bytes(req);
|
||||
|
||||
blk_mq_start_request(req);
|
||||
|
||||
|
@ -92,7 +91,7 @@ static blk_status_t z2_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|||
|
||||
if (len < size)
|
||||
size = len;
|
||||
addr += z2ram_map[ start >> Z2RAM_CHUNKSHIFT ];
|
||||
addr += z2ram_map[start >> Z2RAM_CHUNKSHIFT];
|
||||
if (rq_data_dir(req) == READ)
|
||||
memcpy(buffer, (char *)addr, size);
|
||||
else
|
||||
|
@ -106,323 +105,319 @@ static blk_status_t z2_queue_rq(struct blk_mq_hw_ctx *hctx,
|
|||
return BLK_STS_OK;
|
||||
}
|
||||
|
||||
static void
|
||||
get_z2ram( void )
|
||||
static void get_z2ram(void)
|
||||
{
|
||||
int i;
|
||||
int i;
|
||||
|
||||
for ( i = 0; i < Z2RAM_SIZE / Z2RAM_CHUNKSIZE; i++ )
|
||||
{
|
||||
if ( test_bit( i, zorro_unused_z2ram ) )
|
||||
{
|
||||
z2_count++;
|
||||
z2ram_map[z2ram_size++] = (unsigned long)ZTWO_VADDR(Z2RAM_START) +
|
||||
(i << Z2RAM_CHUNKSHIFT);
|
||||
clear_bit( i, zorro_unused_z2ram );
|
||||
for (i = 0; i < Z2RAM_SIZE / Z2RAM_CHUNKSIZE; i++) {
|
||||
if (test_bit(i, zorro_unused_z2ram)) {
|
||||
z2_count++;
|
||||
z2ram_map[z2ram_size++] =
|
||||
(unsigned long)ZTWO_VADDR(Z2RAM_START) +
|
||||
(i << Z2RAM_CHUNKSHIFT);
|
||||
clear_bit(i, zorro_unused_z2ram);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return;
|
||||
return;
|
||||
}
|
||||
|
||||
static void
|
||||
get_chipram( void )
|
||||
static void get_chipram(void)
|
||||
{
|
||||
|
||||
while ( amiga_chip_avail() > ( Z2RAM_CHUNKSIZE * 4 ) )
|
||||
{
|
||||
chip_count++;
|
||||
z2ram_map[ z2ram_size ] =
|
||||
(u_long)amiga_chip_alloc( Z2RAM_CHUNKSIZE, "z2ram" );
|
||||
while (amiga_chip_avail() > (Z2RAM_CHUNKSIZE * 4)) {
|
||||
chip_count++;
|
||||
z2ram_map[z2ram_size] =
|
||||
(u_long) amiga_chip_alloc(Z2RAM_CHUNKSIZE, "z2ram");
|
||||
|
||||
if ( z2ram_map[ z2ram_size ] == 0 )
|
||||
{
|
||||
break;
|
||||
if (z2ram_map[z2ram_size] == 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
z2ram_size++;
|
||||
}
|
||||
|
||||
z2ram_size++;
|
||||
}
|
||||
|
||||
return;
|
||||
return;
|
||||
}
|
||||
|
||||
static int z2_open(struct block_device *bdev, fmode_t mode)
|
||||
{
|
||||
int device;
|
||||
int max_z2_map = ( Z2RAM_SIZE / Z2RAM_CHUNKSIZE ) *
|
||||
sizeof( z2ram_map[0] );
|
||||
int max_chip_map = ( amiga_chip_size / Z2RAM_CHUNKSIZE ) *
|
||||
sizeof( z2ram_map[0] );
|
||||
int rc = -ENOMEM;
|
||||
int device;
|
||||
int max_z2_map = (Z2RAM_SIZE / Z2RAM_CHUNKSIZE) * sizeof(z2ram_map[0]);
|
||||
int max_chip_map = (amiga_chip_size / Z2RAM_CHUNKSIZE) *
|
||||
sizeof(z2ram_map[0]);
|
||||
int rc = -ENOMEM;
|
||||
|
||||
device = MINOR(bdev->bd_dev);
|
||||
device = MINOR(bdev->bd_dev);
|
||||
|
||||
mutex_lock(&z2ram_mutex);
|
||||
if ( current_device != -1 && current_device != device )
|
||||
{
|
||||
rc = -EBUSY;
|
||||
goto err_out;
|
||||
}
|
||||
mutex_lock(&z2ram_mutex);
|
||||
if (current_device != -1 && current_device != device) {
|
||||
rc = -EBUSY;
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
if ( current_device == -1 )
|
||||
{
|
||||
z2_count = 0;
|
||||
chip_count = 0;
|
||||
list_count = 0;
|
||||
z2ram_size = 0;
|
||||
if (current_device == -1) {
|
||||
z2_count = 0;
|
||||
chip_count = 0;
|
||||
list_count = 0;
|
||||
z2ram_size = 0;
|
||||
|
||||
/* Use a specific list entry. */
|
||||
if (device >= Z2MINOR_MEMLIST1 && device <= Z2MINOR_MEMLIST4) {
|
||||
int index = device - Z2MINOR_MEMLIST1 + 1;
|
||||
unsigned long size, paddr, vaddr;
|
||||
/* Use a specific list entry. */
|
||||
if (device >= Z2MINOR_MEMLIST1 && device <= Z2MINOR_MEMLIST4) {
|
||||
int index = device - Z2MINOR_MEMLIST1 + 1;
|
||||
unsigned long size, paddr, vaddr;
|
||||
|
||||
if (index >= m68k_realnum_memory) {
|
||||
printk( KERN_ERR DEVICE_NAME
|
||||
": no such entry in z2ram_map\n" );
|
||||
goto err_out;
|
||||
}
|
||||
if (index >= m68k_realnum_memory) {
|
||||
printk(KERN_ERR DEVICE_NAME
|
||||
": no such entry in z2ram_map\n");
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
paddr = m68k_memory[index].addr;
|
||||
size = m68k_memory[index].size & ~(Z2RAM_CHUNKSIZE-1);
|
||||
paddr = m68k_memory[index].addr;
|
||||
size = m68k_memory[index].size & ~(Z2RAM_CHUNKSIZE - 1);
|
||||
|
||||
#ifdef __powerpc__
|
||||
/* FIXME: ioremap doesn't build correct memory tables. */
|
||||
{
|
||||
vfree(vmalloc (size));
|
||||
}
|
||||
/* FIXME: ioremap doesn't build correct memory tables. */
|
||||
{
|
||||
vfree(vmalloc(size));
|
||||
}
|
||||
|
||||
vaddr = (unsigned long)ioremap_wt(paddr, size);
|
||||
vaddr = (unsigned long)ioremap_wt(paddr, size);
|
||||
|
||||
#else
|
||||
vaddr = (unsigned long)z_remap_nocache_nonser(paddr, size);
|
||||
vaddr =
|
||||
(unsigned long)z_remap_nocache_nonser(paddr, size);
|
||||
#endif
|
||||
z2ram_map =
|
||||
kmalloc_array(size / Z2RAM_CHUNKSIZE,
|
||||
sizeof(z2ram_map[0]),
|
||||
GFP_KERNEL);
|
||||
if ( z2ram_map == NULL )
|
||||
{
|
||||
printk( KERN_ERR DEVICE_NAME
|
||||
": cannot get mem for z2ram_map\n" );
|
||||
goto err_out;
|
||||
z2ram_map =
|
||||
kmalloc_array(size / Z2RAM_CHUNKSIZE,
|
||||
sizeof(z2ram_map[0]), GFP_KERNEL);
|
||||
if (z2ram_map == NULL) {
|
||||
printk(KERN_ERR DEVICE_NAME
|
||||
": cannot get mem for z2ram_map\n");
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
while (size) {
|
||||
z2ram_map[z2ram_size++] = vaddr;
|
||||
size -= Z2RAM_CHUNKSIZE;
|
||||
vaddr += Z2RAM_CHUNKSIZE;
|
||||
list_count++;
|
||||
}
|
||||
|
||||
if (z2ram_size != 0)
|
||||
printk(KERN_INFO DEVICE_NAME
|
||||
": using %iK List Entry %d Memory\n",
|
||||
list_count * Z2RAM_CHUNK1024, index);
|
||||
} else
|
||||
switch (device) {
|
||||
case Z2MINOR_COMBINED:
|
||||
|
||||
z2ram_map =
|
||||
kmalloc(max_z2_map + max_chip_map,
|
||||
GFP_KERNEL);
|
||||
if (z2ram_map == NULL) {
|
||||
printk(KERN_ERR DEVICE_NAME
|
||||
": cannot get mem for z2ram_map\n");
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
get_z2ram();
|
||||
get_chipram();
|
||||
|
||||
if (z2ram_size != 0)
|
||||
printk(KERN_INFO DEVICE_NAME
|
||||
": using %iK Zorro II RAM and %iK Chip RAM (Total %dK)\n",
|
||||
z2_count * Z2RAM_CHUNK1024,
|
||||
chip_count * Z2RAM_CHUNK1024,
|
||||
(z2_count +
|
||||
chip_count) * Z2RAM_CHUNK1024);
|
||||
|
||||
break;
|
||||
|
||||
case Z2MINOR_Z2ONLY:
|
||||
z2ram_map = kmalloc(max_z2_map, GFP_KERNEL);
|
||||
if (z2ram_map == NULL) {
|
||||
printk(KERN_ERR DEVICE_NAME
|
||||
": cannot get mem for z2ram_map\n");
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
get_z2ram();
|
||||
|
||||
if (z2ram_size != 0)
|
||||
printk(KERN_INFO DEVICE_NAME
|
||||
": using %iK of Zorro II RAM\n",
|
||||
z2_count * Z2RAM_CHUNK1024);
|
||||
|
||||
break;
|
||||
|
||||
case Z2MINOR_CHIPONLY:
|
||||
z2ram_map = kmalloc(max_chip_map, GFP_KERNEL);
|
||||
if (z2ram_map == NULL) {
|
||||
printk(KERN_ERR DEVICE_NAME
|
||||
": cannot get mem for z2ram_map\n");
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
get_chipram();
|
||||
|
||||
if (z2ram_size != 0)
|
||||
printk(KERN_INFO DEVICE_NAME
|
||||
": using %iK Chip RAM\n",
|
||||
chip_count * Z2RAM_CHUNK1024);
|
||||
|
||||
break;
|
||||
|
||||
default:
|
||||
rc = -ENODEV;
|
||||
goto err_out;
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
if (z2ram_size == 0) {
|
||||
printk(KERN_NOTICE DEVICE_NAME
|
||||
": no unused ZII/Chip RAM found\n");
|
||||
goto err_out_kfree;
|
||||
}
|
||||
|
||||
while (size) {
|
||||
z2ram_map[ z2ram_size++ ] = vaddr;
|
||||
size -= Z2RAM_CHUNKSIZE;
|
||||
vaddr += Z2RAM_CHUNKSIZE;
|
||||
list_count++;
|
||||
}
|
||||
|
||||
if ( z2ram_size != 0 )
|
||||
printk( KERN_INFO DEVICE_NAME
|
||||
": using %iK List Entry %d Memory\n",
|
||||
list_count * Z2RAM_CHUNK1024, index );
|
||||
} else
|
||||
|
||||
switch ( device )
|
||||
{
|
||||
case Z2MINOR_COMBINED:
|
||||
|
||||
z2ram_map = kmalloc( max_z2_map + max_chip_map, GFP_KERNEL );
|
||||
if ( z2ram_map == NULL )
|
||||
{
|
||||
printk( KERN_ERR DEVICE_NAME
|
||||
": cannot get mem for z2ram_map\n" );
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
get_z2ram();
|
||||
get_chipram();
|
||||
|
||||
if ( z2ram_size != 0 )
|
||||
printk( KERN_INFO DEVICE_NAME
|
||||
": using %iK Zorro II RAM and %iK Chip RAM (Total %dK)\n",
|
||||
z2_count * Z2RAM_CHUNK1024,
|
||||
chip_count * Z2RAM_CHUNK1024,
|
||||
( z2_count + chip_count ) * Z2RAM_CHUNK1024 );
|
||||
|
||||
break;
|
||||
|
||||
case Z2MINOR_Z2ONLY:
|
||||
z2ram_map = kmalloc( max_z2_map, GFP_KERNEL );
|
||||
if ( z2ram_map == NULL )
|
||||
{
|
||||
printk( KERN_ERR DEVICE_NAME
|
||||
": cannot get mem for z2ram_map\n" );
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
get_z2ram();
|
||||
|
||||
if ( z2ram_size != 0 )
|
||||
printk( KERN_INFO DEVICE_NAME
|
||||
": using %iK of Zorro II RAM\n",
|
||||
z2_count * Z2RAM_CHUNK1024 );
|
||||
|
||||
break;
|
||||
|
||||
case Z2MINOR_CHIPONLY:
|
||||
z2ram_map = kmalloc( max_chip_map, GFP_KERNEL );
|
||||
if ( z2ram_map == NULL )
|
||||
{
|
||||
printk( KERN_ERR DEVICE_NAME
|
||||
": cannot get mem for z2ram_map\n" );
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
get_chipram();
|
||||
|
||||
if ( z2ram_size != 0 )
|
||||
printk( KERN_INFO DEVICE_NAME
|
||||
": using %iK Chip RAM\n",
|
||||
chip_count * Z2RAM_CHUNK1024 );
|
||||
|
||||
break;
|
||||
|
||||
default:
|
||||
rc = -ENODEV;
|
||||
goto err_out;
|
||||
|
||||
break;
|
||||
current_device = device;
|
||||
z2ram_size <<= Z2RAM_CHUNKSHIFT;
|
||||
set_capacity(z2ram_gendisk[device], z2ram_size >> 9);
|
||||
}
|
||||
|
||||
if ( z2ram_size == 0 )
|
||||
{
|
||||
printk( KERN_NOTICE DEVICE_NAME
|
||||
": no unused ZII/Chip RAM found\n" );
|
||||
goto err_out_kfree;
|
||||
}
|
||||
|
||||
current_device = device;
|
||||
z2ram_size <<= Z2RAM_CHUNKSHIFT;
|
||||
set_capacity(z2ram_gendisk, z2ram_size >> 9);
|
||||
}
|
||||
|
||||
mutex_unlock(&z2ram_mutex);
|
||||
return 0;
|
||||
mutex_unlock(&z2ram_mutex);
|
||||
return 0;
|
||||
|
||||
err_out_kfree:
|
||||
kfree(z2ram_map);
|
||||
kfree(z2ram_map);
|
||||
err_out:
|
||||
mutex_unlock(&z2ram_mutex);
|
||||
return rc;
|
||||
mutex_unlock(&z2ram_mutex);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void
|
||||
z2_release(struct gendisk *disk, fmode_t mode)
|
||||
static void z2_release(struct gendisk *disk, fmode_t mode)
|
||||
{
|
||||
mutex_lock(&z2ram_mutex);
|
||||
if ( current_device == -1 ) {
|
||||
mutex_unlock(&z2ram_mutex);
|
||||
return;
|
||||
}
|
||||
mutex_unlock(&z2ram_mutex);
|
||||
/*
|
||||
* FIXME: unmap memory
|
||||
*/
|
||||
mutex_lock(&z2ram_mutex);
|
||||
if (current_device == -1) {
|
||||
mutex_unlock(&z2ram_mutex);
|
||||
return;
|
||||
}
|
||||
mutex_unlock(&z2ram_mutex);
|
||||
/*
|
||||
* FIXME: unmap memory
|
||||
*/
|
||||
}
|
||||
|
||||
static const struct block_device_operations z2_fops =
|
||||
{
|
||||
.owner = THIS_MODULE,
|
||||
.open = z2_open,
|
||||
.release = z2_release,
|
||||
static const struct block_device_operations z2_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = z2_open,
|
||||
.release = z2_release,
|
||||
};
|
||||
|
||||
static struct kobject *z2_find(dev_t dev, int *part, void *data)
|
||||
{
|
||||
*part = 0;
|
||||
return get_disk_and_module(z2ram_gendisk);
|
||||
}
|
||||
|
||||
static struct request_queue *z2_queue;
|
||||
static struct blk_mq_tag_set tag_set;
|
||||
|
||||
static const struct blk_mq_ops z2_mq_ops = {
|
||||
.queue_rq = z2_queue_rq,
|
||||
.queue_rq = z2_queue_rq,
|
||||
};
|
||||
|
||||
static int __init
|
||||
z2_init(void)
|
||||
static int z2ram_register_disk(int minor)
|
||||
{
|
||||
int ret;
|
||||
struct request_queue *q;
|
||||
struct gendisk *disk;
|
||||
|
||||
if (!MACH_IS_AMIGA)
|
||||
return -ENODEV;
|
||||
disk = alloc_disk(1);
|
||||
if (!disk)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = -EBUSY;
|
||||
if (register_blkdev(Z2RAM_MAJOR, DEVICE_NAME))
|
||||
goto err;
|
||||
q = blk_mq_init_queue(&tag_set);
|
||||
if (IS_ERR(q)) {
|
||||
put_disk(disk);
|
||||
return PTR_ERR(q);
|
||||
}
|
||||
|
||||
ret = -ENOMEM;
|
||||
z2ram_gendisk = alloc_disk(1);
|
||||
if (!z2ram_gendisk)
|
||||
goto out_disk;
|
||||
disk->major = Z2RAM_MAJOR;
|
||||
disk->first_minor = minor;
|
||||
disk->fops = &z2_fops;
|
||||
if (minor)
|
||||
sprintf(disk->disk_name, "z2ram%d", minor);
|
||||
else
|
||||
sprintf(disk->disk_name, "z2ram");
|
||||
disk->queue = q;
|
||||
|
||||
z2_queue = blk_mq_init_sq_queue(&tag_set, &z2_mq_ops, 16,
|
||||
BLK_MQ_F_SHOULD_MERGE);
|
||||
if (IS_ERR(z2_queue)) {
|
||||
ret = PTR_ERR(z2_queue);
|
||||
z2_queue = NULL;
|
||||
goto out_queue;
|
||||
}
|
||||
z2ram_gendisk[minor] = disk;
|
||||
add_disk(disk);
|
||||
return 0;
|
||||
}
|
||||
|
||||
z2ram_gendisk->major = Z2RAM_MAJOR;
|
||||
z2ram_gendisk->first_minor = 0;
|
||||
z2ram_gendisk->fops = &z2_fops;
|
||||
sprintf(z2ram_gendisk->disk_name, "z2ram");
|
||||
static int __init z2_init(void)
|
||||
{
|
||||
int ret, i;
|
||||
|
||||
z2ram_gendisk->queue = z2_queue;
|
||||
add_disk(z2ram_gendisk);
|
||||
blk_register_region(MKDEV(Z2RAM_MAJOR, 0), Z2MINOR_COUNT, THIS_MODULE,
|
||||
z2_find, NULL, NULL);
|
||||
if (!MACH_IS_AMIGA)
|
||||
return -ENODEV;
|
||||
|
||||
return 0;
|
||||
if (register_blkdev(Z2RAM_MAJOR, DEVICE_NAME))
|
||||
return -EBUSY;
|
||||
|
||||
out_queue:
|
||||
put_disk(z2ram_gendisk);
|
||||
out_disk:
|
||||
unregister_blkdev(Z2RAM_MAJOR, DEVICE_NAME);
|
||||
err:
|
||||
return ret;
|
||||
tag_set.ops = &z2_mq_ops;
|
||||
tag_set.nr_hw_queues = 1;
|
||||
tag_set.nr_maps = 1;
|
||||
tag_set.queue_depth = 16;
|
||||
tag_set.numa_node = NUMA_NO_NODE;
|
||||
tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
|
||||
ret = blk_mq_alloc_tag_set(&tag_set);
|
||||
if (ret)
|
||||
goto out_unregister_blkdev;
|
||||
|
||||
for (i = 0; i < Z2MINOR_COUNT; i++) {
|
||||
ret = z2ram_register_disk(i);
|
||||
if (ret && i == 0)
|
||||
goto out_free_tagset;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
out_free_tagset:
|
||||
blk_mq_free_tag_set(&tag_set);
|
||||
out_unregister_blkdev:
|
||||
unregister_blkdev(Z2RAM_MAJOR, DEVICE_NAME);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit z2_exit(void)
|
||||
{
|
||||
int i, j;
|
||||
blk_unregister_region(MKDEV(Z2RAM_MAJOR, 0), Z2MINOR_COUNT);
|
||||
unregister_blkdev(Z2RAM_MAJOR, DEVICE_NAME);
|
||||
del_gendisk(z2ram_gendisk);
|
||||
put_disk(z2ram_gendisk);
|
||||
blk_cleanup_queue(z2_queue);
|
||||
blk_mq_free_tag_set(&tag_set);
|
||||
int i, j;
|
||||
|
||||
if ( current_device != -1 )
|
||||
{
|
||||
i = 0;
|
||||
unregister_blkdev(Z2RAM_MAJOR, DEVICE_NAME);
|
||||
|
||||
for ( j = 0 ; j < z2_count; j++ )
|
||||
{
|
||||
set_bit( i++, zorro_unused_z2ram );
|
||||
for (i = 0; i < Z2MINOR_COUNT; i++) {
|
||||
del_gendisk(z2ram_gendisk[i]);
|
||||
blk_cleanup_queue(z2ram_gendisk[i]->queue);
|
||||
put_disk(z2ram_gendisk[i]);
|
||||
}
|
||||
blk_mq_free_tag_set(&tag_set);
|
||||
|
||||
if (current_device != -1) {
|
||||
i = 0;
|
||||
|
||||
for (j = 0; j < z2_count; j++) {
|
||||
set_bit(i++, zorro_unused_z2ram);
|
||||
}
|
||||
|
||||
for (j = 0; j < chip_count; j++) {
|
||||
if (z2ram_map[i]) {
|
||||
amiga_chip_free((void *)z2ram_map[i++]);
|
||||
}
|
||||
}
|
||||
|
||||
if (z2ram_map != NULL) {
|
||||
kfree(z2ram_map);
|
||||
}
|
||||
}
|
||||
|
||||
for ( j = 0 ; j < chip_count; j++ )
|
||||
{
|
||||
if ( z2ram_map[ i ] )
|
||||
{
|
||||
amiga_chip_free( (void *) z2ram_map[ i++ ] );
|
||||
}
|
||||
}
|
||||
|
||||
if ( z2ram_map != NULL )
|
||||
{
|
||||
kfree( z2ram_map );
|
||||
}
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
module_init(z2_init);
|
||||
module_exit(z2_exit);
|
||||
|
|
|
@ -403,13 +403,10 @@ static void reset_bdev(struct zram *zram)
|
|||
return;
|
||||
|
||||
bdev = zram->bdev;
|
||||
if (zram->old_block_size)
|
||||
set_blocksize(bdev, zram->old_block_size);
|
||||
blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
|
||||
/* hope filp_close flush all of IO */
|
||||
filp_close(zram->backing_dev, NULL);
|
||||
zram->backing_dev = NULL;
|
||||
zram->old_block_size = 0;
|
||||
zram->bdev = NULL;
|
||||
zram->disk->fops = &zram_devops;
|
||||
kvfree(zram->bitmap);
|
||||
|
@ -454,7 +451,7 @@ static ssize_t backing_dev_store(struct device *dev,
|
|||
struct file *backing_dev = NULL;
|
||||
struct inode *inode;
|
||||
struct address_space *mapping;
|
||||
unsigned int bitmap_sz, old_block_size = 0;
|
||||
unsigned int bitmap_sz;
|
||||
unsigned long nr_pages, *bitmap = NULL;
|
||||
struct block_device *bdev = NULL;
|
||||
int err;
|
||||
|
@ -509,14 +506,8 @@ static ssize_t backing_dev_store(struct device *dev,
|
|||
goto out;
|
||||
}
|
||||
|
||||
old_block_size = block_size(bdev);
|
||||
err = set_blocksize(bdev, PAGE_SIZE);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
reset_bdev(zram);
|
||||
|
||||
zram->old_block_size = old_block_size;
|
||||
zram->bdev = bdev;
|
||||
zram->backing_dev = backing_dev;
|
||||
zram->bitmap = bitmap;
|
||||
|
@ -1710,8 +1701,8 @@ static void zram_reset_device(struct zram *zram)
|
|||
disksize = zram->disksize;
|
||||
zram->disksize = 0;
|
||||
|
||||
set_capacity(zram->disk, 0);
|
||||
part_stat_set_all(&zram->disk->part0, 0);
|
||||
set_capacity_and_notify(zram->disk, 0);
|
||||
part_stat_set_all(zram->disk->part0, 0);
|
||||
|
||||
up_write(&zram->init_lock);
|
||||
/* I/O operation under all of CPU are done so let's free */
|
||||
|
@ -1756,9 +1747,7 @@ static ssize_t disksize_store(struct device *dev,
|
|||
|
||||
zram->comp = comp;
|
||||
zram->disksize = disksize;
|
||||
set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
|
||||
|
||||
revalidate_disk_size(zram->disk, true);
|
||||
set_capacity_and_notify(zram->disk, zram->disksize >> SECTOR_SHIFT);
|
||||
up_write(&zram->init_lock);
|
||||
|
||||
return len;
|
||||
|
@ -1786,15 +1775,12 @@ static ssize_t reset_store(struct device *dev,
|
|||
return -EINVAL;
|
||||
|
||||
zram = dev_to_zram(dev);
|
||||
bdev = bdget_disk(zram->disk, 0);
|
||||
if (!bdev)
|
||||
return -ENOMEM;
|
||||
bdev = zram->disk->part0;
|
||||
|
||||
mutex_lock(&bdev->bd_mutex);
|
||||
/* Do not reset an active device or claimed device */
|
||||
if (bdev->bd_openers || zram->claim) {
|
||||
mutex_unlock(&bdev->bd_mutex);
|
||||
bdput(bdev);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
|
@ -1805,8 +1791,6 @@ static ssize_t reset_store(struct device *dev,
|
|||
/* Make sure all the pending I/O are finished */
|
||||
fsync_bdev(bdev);
|
||||
zram_reset_device(zram);
|
||||
revalidate_disk_size(zram->disk, true);
|
||||
bdput(bdev);
|
||||
|
||||
mutex_lock(&bdev->bd_mutex);
|
||||
zram->claim = false;
|
||||
|
@ -1992,16 +1976,11 @@ static int zram_add(void)
|
|||
|
||||
static int zram_remove(struct zram *zram)
|
||||
{
|
||||
struct block_device *bdev;
|
||||
|
||||
bdev = bdget_disk(zram->disk, 0);
|
||||
if (!bdev)
|
||||
return -ENOMEM;
|
||||
struct block_device *bdev = zram->disk->part0;
|
||||
|
||||
mutex_lock(&bdev->bd_mutex);
|
||||
if (bdev->bd_openers || zram->claim) {
|
||||
mutex_unlock(&bdev->bd_mutex);
|
||||
bdput(bdev);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
|
@ -2013,7 +1992,6 @@ static int zram_remove(struct zram *zram)
|
|||
/* Make sure all the pending I/O are finished */
|
||||
fsync_bdev(bdev);
|
||||
zram_reset_device(zram);
|
||||
bdput(bdev);
|
||||
|
||||
pr_info("Removed device: %s\n", zram->disk->disk_name);
|
||||
|
||||
|
|
|
@ -119,7 +119,6 @@ struct zram {
|
|||
bool wb_limit_enable;
|
||||
u64 bd_wb_limit;
|
||||
struct block_device *bdev;
|
||||
unsigned int old_block_size;
|
||||
unsigned long *bitmap;
|
||||
unsigned long nr_pages;
|
||||
#endif
|
||||
|
|
|
@ -902,65 +902,14 @@ static int init_irq (ide_hwif_t *hwif)
|
|||
return 1;
|
||||
}
|
||||
|
||||
static int ata_lock(dev_t dev, void *data)
|
||||
static void ata_probe(dev_t dev)
|
||||
{
|
||||
/* FIXME: we want to pin hwif down */
|
||||
return 0;
|
||||
request_module("ide-disk");
|
||||
request_module("ide-cd");
|
||||
request_module("ide-tape");
|
||||
request_module("ide-floppy");
|
||||
}
|
||||
|
||||
static struct kobject *ata_probe(dev_t dev, int *part, void *data)
|
||||
{
|
||||
ide_hwif_t *hwif = data;
|
||||
int unit = *part >> PARTN_BITS;
|
||||
ide_drive_t *drive = hwif->devices[unit];
|
||||
|
||||
if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0)
|
||||
return NULL;
|
||||
|
||||
if (drive->media == ide_disk)
|
||||
request_module("ide-disk");
|
||||
if (drive->media == ide_cdrom || drive->media == ide_optical)
|
||||
request_module("ide-cd");
|
||||
if (drive->media == ide_tape)
|
||||
request_module("ide-tape");
|
||||
if (drive->media == ide_floppy)
|
||||
request_module("ide-floppy");
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct kobject *exact_match(dev_t dev, int *part, void *data)
|
||||
{
|
||||
struct gendisk *p = data;
|
||||
*part &= (1 << PARTN_BITS) - 1;
|
||||
return &disk_to_dev(p)->kobj;
|
||||
}
|
||||
|
||||
static int exact_lock(dev_t dev, void *data)
|
||||
{
|
||||
struct gendisk *p = data;
|
||||
|
||||
if (!get_disk_and_module(p))
|
||||
return -1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void ide_register_region(struct gendisk *disk)
|
||||
{
|
||||
blk_register_region(MKDEV(disk->major, disk->first_minor),
|
||||
disk->minors, NULL, exact_match, exact_lock, disk);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(ide_register_region);
|
||||
|
||||
void ide_unregister_region(struct gendisk *disk)
|
||||
{
|
||||
blk_unregister_region(MKDEV(disk->major, disk->first_minor),
|
||||
disk->minors);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(ide_unregister_region);
|
||||
|
||||
void ide_init_disk(struct gendisk *disk, ide_drive_t *drive)
|
||||
{
|
||||
ide_hwif_t *hwif = drive->hwif;
|
||||
|
@ -999,7 +948,7 @@ static int hwif_init(ide_hwif_t *hwif)
|
|||
return 0;
|
||||
}
|
||||
|
||||
if (register_blkdev(hwif->major, hwif->name))
|
||||
if (__register_blkdev(hwif->major, hwif->name, ata_probe))
|
||||
return 0;
|
||||
|
||||
if (!hwif->sg_max_nents)
|
||||
|
@ -1021,8 +970,6 @@ static int hwif_init(ide_hwif_t *hwif)
|
|||
goto out;
|
||||
}
|
||||
|
||||
blk_register_region(MKDEV(hwif->major, 0), MAX_DRIVES << PARTN_BITS,
|
||||
THIS_MODULE, ata_probe, ata_lock, hwif);
|
||||
return 1;
|
||||
|
||||
out:
|
||||
|
@ -1611,7 +1558,6 @@ static void ide_unregister(ide_hwif_t *hwif)
|
|||
/*
|
||||
* Remove us from the kernel's knowledge
|
||||
*/
|
||||
blk_unregister_region(MKDEV(hwif->major, 0), MAX_DRIVES<<PARTN_BITS);
|
||||
kfree(hwif->sg_table);
|
||||
unregister_blkdev(hwif->major, hwif->name);
|
||||
|
||||
|
|
|
@ -1822,7 +1822,6 @@ static void ide_tape_remove(ide_drive_t *drive)
|
|||
|
||||
ide_proc_unregister_driver(drive, tape->driver);
|
||||
device_del(&tape->dev);
|
||||
ide_unregister_region(tape->disk);
|
||||
|
||||
mutex_lock(&idetape_ref_mutex);
|
||||
put_device(&tape->dev);
|
||||
|
@ -2026,7 +2025,6 @@ static int ide_tape_probe(ide_drive_t *drive)
|
|||
"n%s", tape->name);
|
||||
|
||||
g->fops = &idetape_block_ops;
|
||||
ide_register_region(g);
|
||||
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -475,7 +475,7 @@ struct search {
|
|||
unsigned int read_dirty_data:1;
|
||||
unsigned int cache_missed:1;
|
||||
|
||||
struct hd_struct *part;
|
||||
struct block_device *part;
|
||||
unsigned long start_time;
|
||||
|
||||
struct btree_op op;
|
||||
|
@ -1073,7 +1073,7 @@ struct detached_dev_io_private {
|
|||
unsigned long start_time;
|
||||
bio_end_io_t *bi_end_io;
|
||||
void *bi_private;
|
||||
struct hd_struct *part;
|
||||
struct block_device *part;
|
||||
};
|
||||
|
||||
static void detached_dev_end_io(struct bio *bio)
|
||||
|
@ -1230,8 +1230,9 @@ static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
|
|||
|
||||
if (dc->io_disable)
|
||||
return -EIO;
|
||||
|
||||
return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg);
|
||||
if (!dc->bdev->bd_disk->fops->ioctl)
|
||||
return -ENOTTY;
|
||||
return dc->bdev->bd_disk->fops->ioctl(dc->bdev, mode, cmd, arg);
|
||||
}
|
||||
|
||||
void bch_cached_dev_request_init(struct cached_dev *dc)
|
||||
|
|
|
@ -1408,7 +1408,7 @@ static int cached_dev_init(struct cached_dev *dc, unsigned int block_size)
|
|||
q->limits.raid_partial_stripes_expensive;
|
||||
|
||||
ret = bcache_device_init(&dc->disk, block_size,
|
||||
dc->bdev->bd_part->nr_sects - dc->sb.data_offset,
|
||||
bdev_nr_sectors(dc->bdev) - dc->sb.data_offset,
|
||||
dc->bdev, &bcache_cached_ops);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -1447,8 +1447,7 @@ static int register_bdev(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
|
|||
goto err;
|
||||
|
||||
err = "error creating kobject";
|
||||
if (kobject_add(&dc->disk.kobj, &part_to_dev(bdev->bd_part)->kobj,
|
||||
"bcache"))
|
||||
if (kobject_add(&dc->disk.kobj, bdev_kobj(bdev), "bcache"))
|
||||
goto err;
|
||||
if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj))
|
||||
goto err;
|
||||
|
@ -2342,9 +2341,7 @@ static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
|
|||
goto err;
|
||||
}
|
||||
|
||||
if (kobject_add(&ca->kobj,
|
||||
&part_to_dev(bdev->bd_part)->kobj,
|
||||
"bcache")) {
|
||||
if (kobject_add(&ca->kobj, bdev_kobj(bdev), "bcache")) {
|
||||
err = "error calling kobject_add";
|
||||
ret = -ENOMEM;
|
||||
goto out;
|
||||
|
@ -2383,38 +2380,38 @@ kobj_attribute_write(register, register_bcache);
|
|||
kobj_attribute_write(register_quiet, register_bcache);
|
||||
kobj_attribute_write(pendings_cleanup, bch_pending_bdevs_cleanup);
|
||||
|
||||
static bool bch_is_open_backing(struct block_device *bdev)
|
||||
static bool bch_is_open_backing(dev_t dev)
|
||||
{
|
||||
struct cache_set *c, *tc;
|
||||
struct cached_dev *dc, *t;
|
||||
|
||||
list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
|
||||
list_for_each_entry_safe(dc, t, &c->cached_devs, list)
|
||||
if (dc->bdev == bdev)
|
||||
if (dc->bdev->bd_dev == dev)
|
||||
return true;
|
||||
list_for_each_entry_safe(dc, t, &uncached_devices, list)
|
||||
if (dc->bdev == bdev)
|
||||
if (dc->bdev->bd_dev == dev)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool bch_is_open_cache(struct block_device *bdev)
|
||||
static bool bch_is_open_cache(dev_t dev)
|
||||
{
|
||||
struct cache_set *c, *tc;
|
||||
|
||||
list_for_each_entry_safe(c, tc, &bch_cache_sets, list) {
|
||||
struct cache *ca = c->cache;
|
||||
|
||||
if (ca->bdev == bdev)
|
||||
if (ca->bdev->bd_dev == dev)
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool bch_is_open(struct block_device *bdev)
|
||||
static bool bch_is_open(dev_t dev)
|
||||
{
|
||||
return bch_is_open_cache(bdev) || bch_is_open_backing(bdev);
|
||||
return bch_is_open_cache(dev) || bch_is_open_backing(dev);
|
||||
}
|
||||
|
||||
struct async_reg_args {
|
||||
|
@ -2538,9 +2535,11 @@ static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
|
|||
sb);
|
||||
if (IS_ERR(bdev)) {
|
||||
if (bdev == ERR_PTR(-EBUSY)) {
|
||||
bdev = lookup_bdev(strim(path));
|
||||
dev_t dev;
|
||||
|
||||
mutex_lock(&bch_register_lock);
|
||||
if (!IS_ERR(bdev) && bch_is_open(bdev))
|
||||
if (lookup_bdev(strim(path), &dev) == 0 &&
|
||||
bch_is_open(dev))
|
||||
err = "device already registered";
|
||||
else
|
||||
err = "device busy";
|
||||
|
|
|
@ -96,19 +96,12 @@ struct mapped_device {
|
|||
*/
|
||||
struct workqueue_struct *wq;
|
||||
|
||||
/*
|
||||
* freeze/thaw support require holding onto a super block
|
||||
*/
|
||||
struct super_block *frozen_sb;
|
||||
|
||||
/* forced geometry settings */
|
||||
struct hd_geometry geometry;
|
||||
|
||||
/* kobject and completion */
|
||||
struct dm_kobject_holder kobj_holder;
|
||||
|
||||
struct block_device *bdev;
|
||||
|
||||
struct dm_stats stats;
|
||||
|
||||
/* for blk-mq request-based DM support */
|
||||
|
|
|
@ -700,8 +700,7 @@ static void rs_set_capacity(struct raid_set *rs)
|
|||
{
|
||||
struct gendisk *gendisk = dm_disk(dm_table_get_md(rs->ti->table));
|
||||
|
||||
set_capacity(gendisk, rs->md.array_sectors);
|
||||
revalidate_disk_size(gendisk, true);
|
||||
set_capacity_and_notify(gendisk, rs->md.array_sectors);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -397,7 +397,7 @@ static int map_request(struct dm_rq_target_io *tio)
|
|||
}
|
||||
|
||||
/* The target has remapped the I/O so dispatch it */
|
||||
trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
|
||||
trace_block_rq_remap(clone, disk_devt(dm_disk(md)),
|
||||
blk_rq_pos(rq));
|
||||
ret = dm_dispatch_clone_request(clone, rq);
|
||||
if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) {
|
||||
|
|
|
@ -347,16 +347,9 @@ static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode,
|
|||
dev_t dm_get_dev_t(const char *path)
|
||||
{
|
||||
dev_t dev;
|
||||
struct block_device *bdev;
|
||||
|
||||
bdev = lookup_bdev(path);
|
||||
if (IS_ERR(bdev))
|
||||
if (lookup_bdev(path, &dev))
|
||||
dev = name_to_dev_t(path);
|
||||
else {
|
||||
dev = bdev->bd_dev;
|
||||
bdput(bdev);
|
||||
}
|
||||
|
||||
return dev;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dm_get_dev_t);
|
||||
|
|
|
@ -570,7 +570,10 @@ static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
|
|||
}
|
||||
}
|
||||
|
||||
r = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
|
||||
if (!bdev->bd_disk->fops->ioctl)
|
||||
r = -ENOTTY;
|
||||
else
|
||||
r = bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg);
|
||||
out:
|
||||
dm_unprepare_ioctl(md, srcu_idx);
|
||||
return r;
|
||||
|
@ -1274,8 +1277,7 @@ static blk_qc_t __map_bio(struct dm_target_io *tio)
|
|||
break;
|
||||
case DM_MAPIO_REMAPPED:
|
||||
/* the bio has been remapped so dispatch it */
|
||||
trace_block_bio_remap(clone->bi_disk->queue, clone,
|
||||
bio_dev(io->orig_bio), sector);
|
||||
trace_block_bio_remap(clone, bio_dev(io->orig_bio), sector);
|
||||
ret = submit_bio_noacct(clone);
|
||||
break;
|
||||
case DM_MAPIO_KILL:
|
||||
|
@ -1420,18 +1422,12 @@ static int __send_empty_flush(struct clone_info *ci)
|
|||
*/
|
||||
bio_init(&flush_bio, NULL, 0);
|
||||
flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
|
||||
flush_bio.bi_disk = ci->io->md->disk;
|
||||
bio_associate_blkg(&flush_bio);
|
||||
|
||||
ci->bio = &flush_bio;
|
||||
ci->sector_count = 0;
|
||||
|
||||
/*
|
||||
* Empty flush uses a statically initialized bio, as the base for
|
||||
* cloning. However, blkg association requires that a bdev is
|
||||
* associated with a gendisk, which doesn't happen until the bdev is
|
||||
* opened. So, blkg association is done at issue time of the flush
|
||||
* rather than when the device is created in alloc_dev().
|
||||
*/
|
||||
bio_set_dev(ci->bio, ci->io->md->bdev);
|
||||
|
||||
BUG_ON(bio_has_data(ci->bio));
|
||||
while ((ti = dm_table_get_target(ci->map, target_nr++)))
|
||||
__send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
|
||||
|
@ -1611,12 +1607,12 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
|
|||
* (by eliminating DM's splitting and just using bio_split)
|
||||
*/
|
||||
part_stat_lock();
|
||||
__dm_part_stat_sub(&dm_disk(md)->part0,
|
||||
__dm_part_stat_sub(dm_disk(md)->part0,
|
||||
sectors[op_stat_group(bio_op(bio))], ci.sector_count);
|
||||
part_stat_unlock();
|
||||
|
||||
bio_chain(b, bio);
|
||||
trace_block_split(md->queue, b, bio->bi_iter.bi_sector);
|
||||
trace_block_split(b, bio->bi_iter.bi_sector);
|
||||
ret = submit_bio_noacct(bio);
|
||||
break;
|
||||
}
|
||||
|
@ -1748,11 +1744,6 @@ static void cleanup_mapped_device(struct mapped_device *md)
|
|||
|
||||
cleanup_srcu_struct(&md->io_barrier);
|
||||
|
||||
if (md->bdev) {
|
||||
bdput(md->bdev);
|
||||
md->bdev = NULL;
|
||||
}
|
||||
|
||||
mutex_destroy(&md->suspend_lock);
|
||||
mutex_destroy(&md->type_lock);
|
||||
mutex_destroy(&md->table_devices_lock);
|
||||
|
@ -1844,10 +1835,6 @@ static struct mapped_device *alloc_dev(int minor)
|
|||
if (!md->wq)
|
||||
goto bad;
|
||||
|
||||
md->bdev = bdget_disk(md->disk, 0);
|
||||
if (!md->bdev)
|
||||
goto bad;
|
||||
|
||||
dm_stats_init(&md->stats);
|
||||
|
||||
/* Populate the mapping, nobody knows we exist yet */
|
||||
|
@ -1972,8 +1959,7 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
|
|||
if (size != dm_get_size(md))
|
||||
memset(&md->geometry, 0, sizeof(md->geometry));
|
||||
|
||||
set_capacity(md->disk, size);
|
||||
bd_set_nr_sectors(md->bdev, size);
|
||||
set_capacity_and_notify(md->disk, size);
|
||||
|
||||
dm_table_event_callback(t, event_callback, md);
|
||||
|
||||
|
@ -2256,7 +2242,7 @@ EXPORT_SYMBOL_GPL(dm_put);
|
|||
static bool md_in_flight_bios(struct mapped_device *md)
|
||||
{
|
||||
int cpu;
|
||||
struct hd_struct *part = &dm_disk(md)->part0;
|
||||
struct block_device *part = dm_disk(md)->part0;
|
||||
long sum = 0;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
|
@ -2391,27 +2377,19 @@ static int lock_fs(struct mapped_device *md)
|
|||
{
|
||||
int r;
|
||||
|
||||
WARN_ON(md->frozen_sb);
|
||||
WARN_ON(test_bit(DMF_FROZEN, &md->flags));
|
||||
|
||||
md->frozen_sb = freeze_bdev(md->bdev);
|
||||
if (IS_ERR(md->frozen_sb)) {
|
||||
r = PTR_ERR(md->frozen_sb);
|
||||
md->frozen_sb = NULL;
|
||||
return r;
|
||||
}
|
||||
|
||||
set_bit(DMF_FROZEN, &md->flags);
|
||||
|
||||
return 0;
|
||||
r = freeze_bdev(md->disk->part0);
|
||||
if (!r)
|
||||
set_bit(DMF_FROZEN, &md->flags);
|
||||
return r;
|
||||
}
|
||||
|
||||
static void unlock_fs(struct mapped_device *md)
|
||||
{
|
||||
if (!test_bit(DMF_FROZEN, &md->flags))
|
||||
return;
|
||||
|
||||
thaw_bdev(md->bdev, md->frozen_sb);
|
||||
md->frozen_sb = NULL;
|
||||
thaw_bdev(md->disk->part0);
|
||||
clear_bit(DMF_FROZEN, &md->flags);
|
||||
}
|
||||
|
||||
|
|
|
@ -581,8 +581,7 @@ static int process_recvd_msg(struct mddev *mddev, struct cluster_msg *msg)
|
|||
process_metadata_update(mddev, msg);
|
||||
break;
|
||||
case CHANGE_CAPACITY:
|
||||
set_capacity(mddev->gendisk, mddev->array_sectors);
|
||||
revalidate_disk_size(mddev->gendisk, true);
|
||||
set_capacity_and_notify(mddev->gendisk, mddev->array_sectors);
|
||||
break;
|
||||
case RESYNCING:
|
||||
set_bit(MD_RESYNCING_REMOTE, &mddev->recovery);
|
||||
|
@ -1296,13 +1295,10 @@ static void update_size(struct mddev *mddev, sector_t old_dev_sectors)
|
|||
if (ret)
|
||||
pr_err("%s:%d: failed to send CHANGE_CAPACITY msg\n",
|
||||
__func__, __LINE__);
|
||||
set_capacity(mddev->gendisk, mddev->array_sectors);
|
||||
revalidate_disk_size(mddev->gendisk, true);
|
||||
set_capacity_and_notify(mddev->gendisk, mddev->array_sectors);
|
||||
} else {
|
||||
/* revert to previous sectors */
|
||||
ret = mddev->pers->resize(mddev, old_dev_sectors);
|
||||
if (!ret)
|
||||
revalidate_disk_size(mddev->gendisk, true);
|
||||
ret = __sendmsg(cinfo, &cmsg);
|
||||
if (ret)
|
||||
pr_err("%s:%d: failed to send METADATA_UPDATED msg\n",
|
||||
|
|
|
@ -200,9 +200,8 @@ static int linear_add(struct mddev *mddev, struct md_rdev *rdev)
|
|||
"copied raid_disks doesn't match mddev->raid_disks");
|
||||
rcu_assign_pointer(mddev->private, newconf);
|
||||
md_set_array_sectors(mddev, linear_size(mddev, 0, 0));
|
||||
set_capacity(mddev->gendisk, mddev->array_sectors);
|
||||
set_capacity_and_notify(mddev->gendisk, mddev->array_sectors);
|
||||
mddev_resume(mddev);
|
||||
revalidate_disk_size(mddev->gendisk, true);
|
||||
kfree_rcu(oldconf, rcu);
|
||||
return 0;
|
||||
}
|
||||
|
@ -258,8 +257,7 @@ static bool linear_make_request(struct mddev *mddev, struct bio *bio)
|
|||
bio_endio(bio);
|
||||
} else {
|
||||
if (mddev->gendisk)
|
||||
trace_block_bio_remap(bio->bi_disk->queue,
|
||||
bio, disk_devt(mddev->gendisk),
|
||||
trace_block_bio_remap(bio, disk_devt(mddev->gendisk),
|
||||
bio_sector);
|
||||
mddev_check_writesame(mddev, bio);
|
||||
mddev_check_write_zeroes(mddev, bio);
|
||||
|
|
115
drivers/md/md.c
115
drivers/md/md.c
|
@ -464,7 +464,7 @@ struct md_io {
|
|||
bio_end_io_t *orig_bi_end_io;
|
||||
void *orig_bi_private;
|
||||
unsigned long start_time;
|
||||
struct hd_struct *part;
|
||||
struct block_device *part;
|
||||
};
|
||||
|
||||
static void md_end_io(struct bio *bio)
|
||||
|
@ -2414,7 +2414,6 @@ EXPORT_SYMBOL(md_integrity_add_rdev);
|
|||
static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
|
||||
{
|
||||
char b[BDEVNAME_SIZE];
|
||||
struct kobject *ko;
|
||||
int err;
|
||||
|
||||
/* prevent duplicates */
|
||||
|
@ -2477,9 +2476,8 @@ static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev)
|
|||
if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b)))
|
||||
goto fail;
|
||||
|
||||
ko = &part_to_dev(rdev->bdev->bd_part)->kobj;
|
||||
/* failure here is OK */
|
||||
err = sysfs_create_link(&rdev->kobj, ko, "block");
|
||||
err = sysfs_create_link(&rdev->kobj, bdev_kobj(rdev->bdev), "block");
|
||||
rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state");
|
||||
rdev->sysfs_unack_badblocks =
|
||||
sysfs_get_dirent_safe(rdev->kobj.sd, "unacknowledged_bad_blocks");
|
||||
|
@ -5355,10 +5353,9 @@ array_size_store(struct mddev *mddev, const char *buf, size_t len)
|
|||
|
||||
if (!err) {
|
||||
mddev->array_sectors = sectors;
|
||||
if (mddev->pers) {
|
||||
set_capacity(mddev->gendisk, mddev->array_sectors);
|
||||
revalidate_disk_size(mddev->gendisk, true);
|
||||
}
|
||||
if (mddev->pers)
|
||||
set_capacity_and_notify(mddev->gendisk,
|
||||
mddev->array_sectors);
|
||||
}
|
||||
mddev_unlock(mddev);
|
||||
return err ?: len;
|
||||
|
@ -5765,11 +5762,12 @@ static int md_alloc(dev_t dev, char *name)
|
|||
return error;
|
||||
}
|
||||
|
||||
static struct kobject *md_probe(dev_t dev, int *part, void *data)
|
||||
static void md_probe(dev_t dev)
|
||||
{
|
||||
if (MAJOR(dev) == MD_MAJOR && MINOR(dev) >= 512)
|
||||
return;
|
||||
if (create_on_open)
|
||||
md_alloc(dev, NULL);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int add_named_array(const char *val, const struct kernel_param *kp)
|
||||
|
@ -6107,8 +6105,7 @@ int do_md_run(struct mddev *mddev)
|
|||
md_wakeup_thread(mddev->thread);
|
||||
md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */
|
||||
|
||||
set_capacity(mddev->gendisk, mddev->array_sectors);
|
||||
revalidate_disk_size(mddev->gendisk, true);
|
||||
set_capacity_and_notify(mddev->gendisk, mddev->array_sectors);
|
||||
clear_bit(MD_NOT_READY, &mddev->flags);
|
||||
mddev->changed = 1;
|
||||
kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE);
|
||||
|
@ -6423,10 +6420,9 @@ static int do_md_stop(struct mddev *mddev, int mode,
|
|||
if (rdev->raid_disk >= 0)
|
||||
sysfs_unlink_rdev(mddev, rdev);
|
||||
|
||||
set_capacity(disk, 0);
|
||||
set_capacity_and_notify(disk, 0);
|
||||
mutex_unlock(&mddev->open_mutex);
|
||||
mddev->changed = 1;
|
||||
revalidate_disk_size(disk, true);
|
||||
|
||||
if (mddev->ro)
|
||||
mddev->ro = 0;
|
||||
|
@ -6535,7 +6531,7 @@ static void autorun_devices(int part)
|
|||
break;
|
||||
}
|
||||
|
||||
md_probe(dev, NULL, NULL);
|
||||
md_probe(dev);
|
||||
mddev = mddev_find(dev);
|
||||
if (!mddev || !mddev->gendisk) {
|
||||
if (mddev)
|
||||
|
@ -7257,8 +7253,8 @@ static int update_size(struct mddev *mddev, sector_t num_sectors)
|
|||
if (mddev_is_clustered(mddev))
|
||||
md_cluster_ops->update_size(mddev, old_dev_sectors);
|
||||
else if (mddev->queue) {
|
||||
set_capacity(mddev->gendisk, mddev->array_sectors);
|
||||
revalidate_disk_size(mddev->gendisk, true);
|
||||
set_capacity_and_notify(mddev->gendisk,
|
||||
mddev->array_sectors);
|
||||
}
|
||||
}
|
||||
return rv;
|
||||
|
@ -7480,7 +7476,6 @@ static inline bool md_ioctl_valid(unsigned int cmd)
|
|||
{
|
||||
switch (cmd) {
|
||||
case ADD_NEW_DISK:
|
||||
case BLKROSET:
|
||||
case GET_ARRAY_INFO:
|
||||
case GET_BITMAP_FILE:
|
||||
case GET_DISK_INFO:
|
||||
|
@ -7507,7 +7502,6 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
|
|||
int err = 0;
|
||||
void __user *argp = (void __user *)arg;
|
||||
struct mddev *mddev = NULL;
|
||||
int ro;
|
||||
bool did_set_md_closing = false;
|
||||
|
||||
if (!md_ioctl_valid(cmd))
|
||||
|
@ -7687,35 +7681,6 @@ static int md_ioctl(struct block_device *bdev, fmode_t mode,
|
|||
goto unlock;
|
||||
}
|
||||
break;
|
||||
|
||||
case BLKROSET:
|
||||
if (get_user(ro, (int __user *)(arg))) {
|
||||
err = -EFAULT;
|
||||
goto unlock;
|
||||
}
|
||||
err = -EINVAL;
|
||||
|
||||
/* if the bdev is going readonly the value of mddev->ro
|
||||
* does not matter, no writes are coming
|
||||
*/
|
||||
if (ro)
|
||||
goto unlock;
|
||||
|
||||
/* are we are already prepared for writes? */
|
||||
if (mddev->ro != 1)
|
||||
goto unlock;
|
||||
|
||||
/* transitioning to readauto need only happen for
|
||||
* arrays that call md_write_start
|
||||
*/
|
||||
if (mddev->pers) {
|
||||
err = restart_array(mddev);
|
||||
if (err == 0) {
|
||||
mddev->ro = 2;
|
||||
set_disk_ro(mddev->gendisk, 0);
|
||||
}
|
||||
}
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -7809,6 +7774,36 @@ static int md_compat_ioctl(struct block_device *bdev, fmode_t mode,
|
|||
}
|
||||
#endif /* CONFIG_COMPAT */
|
||||
|
||||
static int md_set_read_only(struct block_device *bdev, bool ro)
|
||||
{
|
||||
struct mddev *mddev = bdev->bd_disk->private_data;
|
||||
int err;
|
||||
|
||||
err = mddev_lock(mddev);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (!mddev->raid_disks && !mddev->external) {
|
||||
err = -ENODEV;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
/*
|
||||
* Transitioning to read-auto need only happen for arrays that call
|
||||
* md_write_start and which are not ready for writes yet.
|
||||
*/
|
||||
if (!ro && mddev->ro == 1 && mddev->pers) {
|
||||
err = restart_array(mddev);
|
||||
if (err)
|
||||
goto out_unlock;
|
||||
mddev->ro = 2;
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
mddev_unlock(mddev);
|
||||
return err;
|
||||
}
|
||||
|
||||
static int md_open(struct block_device *bdev, fmode_t mode)
|
||||
{
|
||||
/*
|
||||
|
@ -7886,6 +7881,7 @@ const struct block_device_operations md_fops =
|
|||
#endif
|
||||
.getgeo = md_getgeo,
|
||||
.check_events = md_check_events,
|
||||
.set_read_only = md_set_read_only,
|
||||
};
|
||||
|
||||
static int md_thread(void *arg)
|
||||
|
@ -8445,7 +8441,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
|
|||
rcu_read_lock();
|
||||
rdev_for_each_rcu(rdev, mddev) {
|
||||
struct gendisk *disk = rdev->bdev->bd_disk;
|
||||
curr_events = (int)part_stat_read_accum(&disk->part0, sectors) -
|
||||
curr_events = (int)part_stat_read_accum(disk->part0, sectors) -
|
||||
atomic_read(&disk->sync_io);
|
||||
/* sync IO will cause sync_io to increase before the disk_stats
|
||||
* as sync_io is counted when a request starts, and
|
||||
|
@ -9015,10 +9011,9 @@ void md_do_sync(struct md_thread *thread)
|
|||
mddev_lock_nointr(mddev);
|
||||
md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0));
|
||||
mddev_unlock(mddev);
|
||||
if (!mddev_is_clustered(mddev)) {
|
||||
set_capacity(mddev->gendisk, mddev->array_sectors);
|
||||
revalidate_disk_size(mddev->gendisk, true);
|
||||
}
|
||||
if (!mddev_is_clustered(mddev))
|
||||
set_capacity_and_notify(mddev->gendisk,
|
||||
mddev->array_sectors);
|
||||
}
|
||||
|
||||
spin_lock(&mddev->lock);
|
||||
|
@ -9547,18 +9542,15 @@ static int __init md_init(void)
|
|||
if (!md_rdev_misc_wq)
|
||||
goto err_rdev_misc_wq;
|
||||
|
||||
if ((ret = register_blkdev(MD_MAJOR, "md")) < 0)
|
||||
ret = __register_blkdev(MD_MAJOR, "md", md_probe);
|
||||
if (ret < 0)
|
||||
goto err_md;
|
||||
|
||||
if ((ret = register_blkdev(0, "mdp")) < 0)
|
||||
ret = __register_blkdev(0, "mdp", md_probe);
|
||||
if (ret < 0)
|
||||
goto err_mdp;
|
||||
mdp_major = ret;
|
||||
|
||||
blk_register_region(MKDEV(MD_MAJOR, 0), 512, THIS_MODULE,
|
||||
md_probe, NULL, NULL);
|
||||
blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE,
|
||||
md_probe, NULL, NULL);
|
||||
|
||||
register_reboot_notifier(&md_notifier);
|
||||
raid_table_header = register_sysctl_table(raid_root_table);
|
||||
|
||||
|
@ -9825,9 +9817,6 @@ static __exit void md_exit(void)
|
|||
struct list_head *tmp;
|
||||
int delay = 1;
|
||||
|
||||
blk_unregister_region(MKDEV(MD_MAJOR,0), 512);
|
||||
blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS);
|
||||
|
||||
unregister_blkdev(MD_MAJOR,"md");
|
||||
unregister_blkdev(mdp_major, "mdp");
|
||||
unregister_reboot_notifier(&md_notifier);
|
||||
|
|
|
@ -508,8 +508,8 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio)
|
|||
bio_chain(discard_bio, bio);
|
||||
bio_clone_blkg_association(discard_bio, bio);
|
||||
if (mddev->gendisk)
|
||||
trace_block_bio_remap(bdev_get_queue(rdev->bdev),
|
||||
discard_bio, disk_devt(mddev->gendisk),
|
||||
trace_block_bio_remap(discard_bio,
|
||||
disk_devt(mddev->gendisk),
|
||||
bio->bi_iter.bi_sector);
|
||||
submit_bio_noacct(discard_bio);
|
||||
}
|
||||
|
@ -581,8 +581,8 @@ static bool raid0_make_request(struct mddev *mddev, struct bio *bio)
|
|||
tmp_dev->data_offset;
|
||||
|
||||
if (mddev->gendisk)
|
||||
trace_block_bio_remap(bio->bi_disk->queue, bio,
|
||||
disk_devt(mddev->gendisk), bio_sector);
|
||||
trace_block_bio_remap(bio, disk_devt(mddev->gendisk),
|
||||
bio_sector);
|
||||
mddev_check_writesame(mddev, bio);
|
||||
mddev_check_write_zeroes(mddev, bio);
|
||||
submit_bio_noacct(bio);
|
||||
|
|
|
@ -1305,8 +1305,8 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
|
|||
read_bio->bi_private = r1_bio;
|
||||
|
||||
if (mddev->gendisk)
|
||||
trace_block_bio_remap(read_bio->bi_disk->queue, read_bio,
|
||||
disk_devt(mddev->gendisk), r1_bio->sector);
|
||||
trace_block_bio_remap(read_bio, disk_devt(mddev->gendisk),
|
||||
r1_bio->sector);
|
||||
|
||||
submit_bio_noacct(read_bio);
|
||||
}
|
||||
|
@ -1517,8 +1517,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
|
|||
atomic_inc(&r1_bio->remaining);
|
||||
|
||||
if (mddev->gendisk)
|
||||
trace_block_bio_remap(mbio->bi_disk->queue,
|
||||
mbio, disk_devt(mddev->gendisk),
|
||||
trace_block_bio_remap(mbio, disk_devt(mddev->gendisk),
|
||||
r1_bio->sector);
|
||||
/* flush_pending_writes() needs access to the rdev so...*/
|
||||
mbio->bi_disk = (void *)conf->mirrors[i].rdev;
|
||||
|
|
|
@ -1201,8 +1201,7 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio,
|
|||
read_bio->bi_private = r10_bio;
|
||||
|
||||
if (mddev->gendisk)
|
||||
trace_block_bio_remap(read_bio->bi_disk->queue,
|
||||
read_bio, disk_devt(mddev->gendisk),
|
||||
trace_block_bio_remap(read_bio, disk_devt(mddev->gendisk),
|
||||
r10_bio->sector);
|
||||
submit_bio_noacct(read_bio);
|
||||
return;
|
||||
|
@ -1251,8 +1250,7 @@ static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
|
|||
mbio->bi_private = r10_bio;
|
||||
|
||||
if (conf->mddev->gendisk)
|
||||
trace_block_bio_remap(mbio->bi_disk->queue,
|
||||
mbio, disk_devt(conf->mddev->gendisk),
|
||||
trace_block_bio_remap(mbio, disk_devt(conf->mddev->gendisk),
|
||||
r10_bio->sector);
|
||||
/* flush_pending_writes() needs access to the rdev so...*/
|
||||
mbio->bi_disk = (void *)rdev;
|
||||
|
|
|
@ -1222,9 +1222,9 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
|
|||
set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
|
||||
|
||||
if (conf->mddev->gendisk)
|
||||
trace_block_bio_remap(bi->bi_disk->queue,
|
||||
bi, disk_devt(conf->mddev->gendisk),
|
||||
sh->dev[i].sector);
|
||||
trace_block_bio_remap(bi,
|
||||
disk_devt(conf->mddev->gendisk),
|
||||
sh->dev[i].sector);
|
||||
if (should_defer && op_is_write(op))
|
||||
bio_list_add(&pending_bios, bi);
|
||||
else
|
||||
|
@ -1272,9 +1272,9 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
|
|||
if (op == REQ_OP_DISCARD)
|
||||
rbi->bi_vcnt = 0;
|
||||
if (conf->mddev->gendisk)
|
||||
trace_block_bio_remap(rbi->bi_disk->queue,
|
||||
rbi, disk_devt(conf->mddev->gendisk),
|
||||
sh->dev[i].sector);
|
||||
trace_block_bio_remap(rbi,
|
||||
disk_devt(conf->mddev->gendisk),
|
||||
sh->dev[i].sector);
|
||||
if (should_defer && op_is_write(op))
|
||||
bio_list_add(&pending_bios, rbi);
|
||||
else
|
||||
|
@ -5468,8 +5468,7 @@ static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio)
|
|||
spin_unlock_irq(&conf->device_lock);
|
||||
|
||||
if (mddev->gendisk)
|
||||
trace_block_bio_remap(align_bi->bi_disk->queue,
|
||||
align_bi, disk_devt(mddev->gendisk),
|
||||
trace_block_bio_remap(align_bi, disk_devt(mddev->gendisk),
|
||||
raid_bio->bi_iter.bi_sector);
|
||||
submit_bio_noacct(align_bi);
|
||||
return 1;
|
||||
|
|
|
@ -298,38 +298,10 @@ static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int blktrans_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
struct mtd_blktrans_dev *dev = blktrans_dev_get(bdev->bd_disk);
|
||||
int ret = -ENXIO;
|
||||
|
||||
if (!dev)
|
||||
return ret;
|
||||
|
||||
mutex_lock(&dev->lock);
|
||||
|
||||
if (!dev->mtd)
|
||||
goto unlock;
|
||||
|
||||
switch (cmd) {
|
||||
case BLKFLSBUF:
|
||||
ret = dev->tr->flush ? dev->tr->flush(dev) : 0;
|
||||
break;
|
||||
default:
|
||||
ret = -ENOTTY;
|
||||
}
|
||||
unlock:
|
||||
mutex_unlock(&dev->lock);
|
||||
blktrans_dev_put(dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct block_device_operations mtd_block_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = blktrans_open,
|
||||
.release = blktrans_release,
|
||||
.ioctl = blktrans_ioctl,
|
||||
.getgeo = blktrans_getgeo,
|
||||
};
|
||||
|
||||
|
|
|
@ -120,8 +120,8 @@ int get_tree_mtd(struct fs_context *fc,
|
|||
struct fs_context *fc))
|
||||
{
|
||||
#ifdef CONFIG_BLOCK
|
||||
struct block_device *bdev;
|
||||
int ret, major;
|
||||
dev_t dev;
|
||||
int ret;
|
||||
#endif
|
||||
int mtdnr;
|
||||
|
||||
|
@ -169,20 +169,15 @@ int get_tree_mtd(struct fs_context *fc,
|
|||
/* try the old way - the hack where we allowed users to mount
|
||||
* /dev/mtdblock$(n) but didn't actually _use_ the blockdev
|
||||
*/
|
||||
bdev = lookup_bdev(fc->source);
|
||||
if (IS_ERR(bdev)) {
|
||||
ret = PTR_ERR(bdev);
|
||||
ret = lookup_bdev(fc->source, &dev);
|
||||
if (ret) {
|
||||
errorf(fc, "MTD: Couldn't look up '%s': %d", fc->source, ret);
|
||||
return ret;
|
||||
}
|
||||
pr_debug("MTDSB: lookup_bdev() returned 0\n");
|
||||
|
||||
major = MAJOR(bdev->bd_dev);
|
||||
mtdnr = MINOR(bdev->bd_dev);
|
||||
bdput(bdev);
|
||||
|
||||
if (major == MTD_BLOCK_MAJOR)
|
||||
return mtd_get_sb_by_nr(fc, mtdnr, fill_super);
|
||||
if (MAJOR(dev) == MTD_BLOCK_MAJOR)
|
||||
return mtd_get_sb_by_nr(fc, MINOR(dev), fill_super);
|
||||
|
||||
#endif /* CONFIG_BLOCK */
|
||||
|
||||
|
|
|
@ -93,16 +93,6 @@ static void nvme_put_subsystem(struct nvme_subsystem *subsys);
|
|||
static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
|
||||
unsigned nsid);
|
||||
|
||||
static void nvme_update_bdev_size(struct gendisk *disk)
|
||||
{
|
||||
struct block_device *bdev = bdget_disk(disk, 0);
|
||||
|
||||
if (bdev) {
|
||||
bd_set_nr_sectors(bdev, get_capacity(disk));
|
||||
bdput(bdev);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Prepare a queue for teardown.
|
||||
*
|
||||
|
@ -119,8 +109,7 @@ static void nvme_set_queue_dying(struct nvme_ns *ns)
|
|||
blk_set_queue_dying(ns->queue);
|
||||
blk_mq_unquiesce_queue(ns->queue);
|
||||
|
||||
set_capacity(ns->disk, 0);
|
||||
nvme_update_bdev_size(ns->disk);
|
||||
set_capacity_and_notify(ns->disk, 0);
|
||||
}
|
||||
|
||||
static void nvme_queue_scan(struct nvme_ctrl *ctrl)
|
||||
|
@ -2053,7 +2042,7 @@ static void nvme_update_disk_info(struct gendisk *disk,
|
|||
capacity = 0;
|
||||
}
|
||||
|
||||
set_capacity_revalidate_and_notify(disk, capacity, false);
|
||||
set_capacity_and_notify(disk, capacity);
|
||||
|
||||
nvme_config_discard(disk, ns);
|
||||
nvme_config_write_zeroes(disk, ns);
|
||||
|
@ -2134,7 +2123,6 @@ static int nvme_update_ns_info(struct nvme_ns *ns, struct nvme_id_ns *id)
|
|||
blk_stack_limits(&ns->head->disk->queue->limits,
|
||||
&ns->queue->limits, 0);
|
||||
blk_queue_update_readahead(ns->head->disk->queue);
|
||||
nvme_update_bdev_size(ns->head->disk);
|
||||
blk_mq_unfreeze_queue(ns->head->disk->queue);
|
||||
}
|
||||
#endif
|
||||
|
@ -3962,8 +3950,6 @@ static void nvme_validate_ns(struct nvme_ns *ns, struct nvme_ns_ids *ids)
|
|||
*/
|
||||
if (ret && ret != -ENOMEM && !(ret > 0 && !(ret & NVME_SC_DNR)))
|
||||
nvme_ns_remove(ns);
|
||||
else
|
||||
revalidate_disk_size(ns->disk, true);
|
||||
}
|
||||
|
||||
static void nvme_validate_or_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid)
|
||||
|
|
|
@ -312,8 +312,7 @@ blk_qc_t nvme_ns_head_submit_bio(struct bio *bio)
|
|||
if (likely(ns)) {
|
||||
bio->bi_disk = ns->disk;
|
||||
bio->bi_opf |= REQ_NVME_MPATH;
|
||||
trace_block_bio_remap(bio->bi_disk->queue, bio,
|
||||
disk_devt(ns->head->disk),
|
||||
trace_block_bio_remap(bio, disk_devt(ns->head->disk),
|
||||
bio->bi_iter.bi_sector);
|
||||
ret = submit_bio_noacct(bio);
|
||||
} else if (nvme_available_path(head)) {
|
||||
|
|
|
@ -89,12 +89,12 @@ static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req,
|
|||
if (!ns->bdev)
|
||||
goto out;
|
||||
|
||||
host_reads = part_stat_read(ns->bdev->bd_part, ios[READ]);
|
||||
data_units_read = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part,
|
||||
sectors[READ]), 1000);
|
||||
host_writes = part_stat_read(ns->bdev->bd_part, ios[WRITE]);
|
||||
data_units_written = DIV_ROUND_UP(part_stat_read(ns->bdev->bd_part,
|
||||
sectors[WRITE]), 1000);
|
||||
host_reads = part_stat_read(ns->bdev, ios[READ]);
|
||||
data_units_read =
|
||||
DIV_ROUND_UP(part_stat_read(ns->bdev, sectors[READ]), 1000);
|
||||
host_writes = part_stat_read(ns->bdev, ios[WRITE]);
|
||||
data_units_written =
|
||||
DIV_ROUND_UP(part_stat_read(ns->bdev, sectors[WRITE]), 1000);
|
||||
|
||||
put_unaligned_le64(host_reads, &slog->host_reads[0]);
|
||||
put_unaligned_le64(data_units_read, &slog->data_units_read[0]);
|
||||
|
@ -120,12 +120,12 @@ static u16 nvmet_get_smart_log_all(struct nvmet_req *req,
|
|||
/* we don't have the right data for file backed ns */
|
||||
if (!ns->bdev)
|
||||
continue;
|
||||
host_reads += part_stat_read(ns->bdev->bd_part, ios[READ]);
|
||||
host_reads += part_stat_read(ns->bdev, ios[READ]);
|
||||
data_units_read += DIV_ROUND_UP(
|
||||
part_stat_read(ns->bdev->bd_part, sectors[READ]), 1000);
|
||||
host_writes += part_stat_read(ns->bdev->bd_part, ios[WRITE]);
|
||||
part_stat_read(ns->bdev, sectors[READ]), 1000);
|
||||
host_writes += part_stat_read(ns->bdev, ios[WRITE]);
|
||||
data_units_written += DIV_ROUND_UP(
|
||||
part_stat_read(ns->bdev->bd_part, sectors[WRITE]), 1000);
|
||||
part_stat_read(ns->bdev, sectors[WRITE]), 1000);
|
||||
}
|
||||
|
||||
put_unaligned_le64(host_reads, &slog->host_reads[0]);
|
||||
|
|
|
@ -211,6 +211,8 @@ static int nvme_loop_init_request(struct blk_mq_tag_set *set,
|
|||
(set == &ctrl->tag_set) ? hctx_idx + 1 : 0);
|
||||
}
|
||||
|
||||
static struct lock_class_key loop_hctx_fq_lock_key;
|
||||
|
||||
static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
|
||||
unsigned int hctx_idx)
|
||||
{
|
||||
|
@ -219,6 +221,14 @@ static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
|
|||
|
||||
BUG_ON(hctx_idx >= ctrl->ctrl.queue_count);
|
||||
|
||||
/*
|
||||
* flush_end_io() can be called recursively for us, so use our own
|
||||
* lock class key for avoiding lockdep possible recursive locking,
|
||||
* then we can remove the dynamically allocated lock class for each
|
||||
* flush queue, that way may cause horrible boot delay.
|
||||
*/
|
||||
blk_mq_hctx_set_fq_lock_class(hctx, &loop_hctx_fq_lock_key);
|
||||
|
||||
hctx->driver_data = queue;
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -430,7 +430,7 @@ dasd_state_ready_to_online(struct dasd_device * device)
|
|||
{
|
||||
struct gendisk *disk;
|
||||
struct disk_part_iter piter;
|
||||
struct hd_struct *part;
|
||||
struct block_device *part;
|
||||
|
||||
device->state = DASD_STATE_ONLINE;
|
||||
if (device->block) {
|
||||
|
@ -443,7 +443,7 @@ dasd_state_ready_to_online(struct dasd_device * device)
|
|||
disk = device->block->bdev->bd_disk;
|
||||
disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
|
||||
while ((part = disk_part_iter_next(&piter)))
|
||||
kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE);
|
||||
kobject_uevent(bdev_kobj(part), KOBJ_CHANGE);
|
||||
disk_part_iter_exit(&piter);
|
||||
}
|
||||
return 0;
|
||||
|
@ -457,7 +457,7 @@ static int dasd_state_online_to_ready(struct dasd_device *device)
|
|||
int rc;
|
||||
struct gendisk *disk;
|
||||
struct disk_part_iter piter;
|
||||
struct hd_struct *part;
|
||||
struct block_device *part;
|
||||
|
||||
if (device->discipline->online_to_ready) {
|
||||
rc = device->discipline->online_to_ready(device);
|
||||
|
@ -470,7 +470,7 @@ static int dasd_state_online_to_ready(struct dasd_device *device)
|
|||
disk = device->block->bdev->bd_disk;
|
||||
disk_part_iter_init(&piter, disk, DISK_PITER_INCL_PART0);
|
||||
while ((part = disk_part_iter_next(&piter)))
|
||||
kobject_uevent(&part_to_dev(part)->kobj, KOBJ_CHANGE);
|
||||
kobject_uevent(bdev_kobj(part), KOBJ_CHANGE);
|
||||
disk_part_iter_exit(&piter);
|
||||
}
|
||||
return 0;
|
||||
|
@ -3376,6 +3376,7 @@ dasd_device_operations = {
|
|||
.ioctl = dasd_ioctl,
|
||||
.compat_ioctl = dasd_ioctl,
|
||||
.getgeo = dasd_getgeo,
|
||||
.set_read_only = dasd_set_read_only,
|
||||
};
|
||||
|
||||
/*******************************************************************************
|
||||
|
|
|
@ -834,7 +834,8 @@ int dasd_scan_partitions(struct dasd_block *);
|
|||
void dasd_destroy_partitions(struct dasd_block *);
|
||||
|
||||
/* externals in dasd_ioctl.c */
|
||||
int dasd_ioctl(struct block_device *, fmode_t, unsigned int, unsigned long);
|
||||
int dasd_ioctl(struct block_device *, fmode_t, unsigned int, unsigned long);
|
||||
int dasd_set_read_only(struct block_device *bdev, bool ro);
|
||||
|
||||
/* externals in dasd_proc.c */
|
||||
int dasd_proc_init(void);
|
||||
|
|
|
@ -54,8 +54,6 @@ dasd_ioctl_enable(struct block_device *bdev)
|
|||
return -ENODEV;
|
||||
|
||||
dasd_enable_device(base);
|
||||
/* Formatting the dasd device can change the capacity. */
|
||||
bd_set_nr_sectors(bdev, get_capacity(base->block->gdp));
|
||||
dasd_put_device(base);
|
||||
return 0;
|
||||
}
|
||||
|
@ -88,7 +86,7 @@ dasd_ioctl_disable(struct block_device *bdev)
|
|||
* Set i_size to zero, since read, write, etc. check against this
|
||||
* value.
|
||||
*/
|
||||
bd_set_nr_sectors(bdev, 0);
|
||||
set_capacity(bdev->bd_disk, 0);
|
||||
dasd_put_device(base);
|
||||
return 0;
|
||||
}
|
||||
|
@ -222,9 +220,8 @@ dasd_format(struct dasd_block *block, struct format_data_t *fdata)
|
|||
* enabling the device later.
|
||||
*/
|
||||
if (fdata->start_unit == 0) {
|
||||
struct block_device *bdev = bdget_disk(block->gdp, 0);
|
||||
bdev->bd_inode->i_blkbits = blksize_bits(fdata->blksize);
|
||||
bdput(bdev);
|
||||
block->gdp->part0->bd_inode->i_blkbits =
|
||||
blksize_bits(fdata->blksize);
|
||||
}
|
||||
|
||||
rc = base->discipline->format_device(base, fdata, 1);
|
||||
|
@ -532,28 +529,22 @@ static int dasd_ioctl_information(struct dasd_block *block, void __user *argp,
|
|||
/*
|
||||
* Set read only
|
||||
*/
|
||||
static int
|
||||
dasd_ioctl_set_ro(struct block_device *bdev, void __user *argp)
|
||||
int dasd_set_read_only(struct block_device *bdev, bool ro)
|
||||
{
|
||||
struct dasd_device *base;
|
||||
int intval, rc;
|
||||
int rc;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EACCES;
|
||||
/* do not manipulate hardware state for partitions */
|
||||
if (bdev_is_partition(bdev))
|
||||
// ro setting is not allowed for partitions
|
||||
return -EINVAL;
|
||||
if (get_user(intval, (int __user *)argp))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
|
||||
base = dasd_device_from_gendisk(bdev->bd_disk);
|
||||
if (!base)
|
||||
return -ENODEV;
|
||||
if (!intval && test_bit(DASD_FLAG_DEVICE_RO, &base->flags)) {
|
||||
dasd_put_device(base);
|
||||
return -EROFS;
|
||||
}
|
||||
set_disk_ro(bdev->bd_disk, intval);
|
||||
rc = dasd_set_feature(base->cdev, DASD_FEATURE_READONLY, intval);
|
||||
if (!ro && test_bit(DASD_FLAG_DEVICE_RO, &base->flags))
|
||||
rc = -EROFS;
|
||||
else
|
||||
rc = dasd_set_feature(base->cdev, DASD_FEATURE_READONLY, ro);
|
||||
dasd_put_device(base);
|
||||
return rc;
|
||||
}
|
||||
|
@ -633,9 +624,6 @@ int dasd_ioctl(struct block_device *bdev, fmode_t mode,
|
|||
case BIODASDPRRST:
|
||||
rc = dasd_ioctl_reset_profile(block);
|
||||
break;
|
||||
case BLKROSET:
|
||||
rc = dasd_ioctl_set_ro(bdev, argp);
|
||||
break;
|
||||
case DASDAPIVER:
|
||||
rc = dasd_ioctl_api_version(argp);
|
||||
break;
|
||||
|
|
|
@ -2359,8 +2359,7 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
|
|||
}
|
||||
}
|
||||
|
||||
blk_add_driver_data(scsi->request->q, scsi->request, &blktrc,
|
||||
sizeof(blktrc));
|
||||
blk_add_driver_data(scsi->request, &blktrc, sizeof(blktrc));
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -32,7 +32,7 @@
|
|||
*/
|
||||
unsigned char *scsi_bios_ptable(struct block_device *dev)
|
||||
{
|
||||
struct address_space *mapping = dev->bd_contains->bd_inode->i_mapping;
|
||||
struct address_space *mapping = bdev_whole(dev)->bd_inode->i_mapping;
|
||||
unsigned char *res = NULL;
|
||||
struct page *page;
|
||||
|
||||
|
|
|
@ -630,13 +630,11 @@ static struct scsi_driver sd_template = {
|
|||
};
|
||||
|
||||
/*
|
||||
* Dummy kobj_map->probe function.
|
||||
* The default ->probe function will call modprobe, which is
|
||||
* pointless as this module is already loaded.
|
||||
* Don't request a new module, as that could deadlock in multipath
|
||||
* environment.
|
||||
*/
|
||||
static struct kobject *sd_default_probe(dev_t devt, int *partno, void *data)
|
||||
static void sd_default_probe(dev_t devt)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1750,10 +1748,8 @@ static int sd_sync_cache(struct scsi_disk *sdkp, struct scsi_sense_hdr *sshdr)
|
|||
static void sd_rescan(struct device *dev)
|
||||
{
|
||||
struct scsi_disk *sdkp = dev_get_drvdata(dev);
|
||||
int ret;
|
||||
|
||||
ret = sd_revalidate_disk(sdkp->disk);
|
||||
revalidate_disk_size(sdkp->disk, ret == 0);
|
||||
sd_revalidate_disk(sdkp->disk);
|
||||
}
|
||||
|
||||
static int sd_ioctl(struct block_device *bdev, fmode_t mode,
|
||||
|
@ -3265,8 +3261,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
|
|||
|
||||
sdkp->first_scan = 0;
|
||||
|
||||
set_capacity_revalidate_and_notify(disk,
|
||||
logical_to_sectors(sdp, sdkp->capacity), false);
|
||||
set_capacity_and_notify(disk, logical_to_sectors(sdp, sdkp->capacity));
|
||||
sd_config_write_same(sdkp);
|
||||
kfree(buffer);
|
||||
|
||||
|
@ -3276,7 +3271,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
|
|||
* capacity to 0.
|
||||
*/
|
||||
if (sd_zbc_revalidate_zones(sdkp))
|
||||
set_capacity_revalidate_and_notify(disk, 0, false);
|
||||
set_capacity_and_notify(disk, 0);
|
||||
|
||||
out:
|
||||
return 0;
|
||||
|
@ -3528,9 +3523,6 @@ static int sd_remove(struct device *dev)
|
|||
|
||||
free_opal_dev(sdkp->opal_dev);
|
||||
|
||||
blk_register_region(devt, SD_MINORS, NULL,
|
||||
sd_default_probe, NULL, NULL);
|
||||
|
||||
mutex_lock(&sd_ref_mutex);
|
||||
dev_set_drvdata(dev, NULL);
|
||||
put_device(&sdkp->dev);
|
||||
|
@ -3720,11 +3712,9 @@ static int __init init_sd(void)
|
|||
SCSI_LOG_HLQUEUE(3, printk("init_sd: sd driver entry point\n"));
|
||||
|
||||
for (i = 0; i < SD_MAJORS; i++) {
|
||||
if (register_blkdev(sd_major(i), "sd") != 0)
|
||||
if (__register_blkdev(sd_major(i), "sd", sd_default_probe))
|
||||
continue;
|
||||
majors++;
|
||||
blk_register_region(sd_major(i), SD_MINORS, NULL,
|
||||
sd_default_probe, NULL, NULL);
|
||||
}
|
||||
|
||||
if (!majors)
|
||||
|
@ -3797,10 +3787,8 @@ static void __exit exit_sd(void)
|
|||
|
||||
class_unregister(&sd_disk_class);
|
||||
|
||||
for (i = 0; i < SD_MAJORS; i++) {
|
||||
blk_unregister_region(sd_major(i), SD_MINORS);
|
||||
for (i = 0; i < SD_MAJORS; i++)
|
||||
unregister_blkdev(sd_major(i), "sd");
|
||||
}
|
||||
}
|
||||
|
||||
module_init(init_sd);
|
||||
|
|
|
@ -133,10 +133,10 @@ static int fd_configure_device(struct se_device *dev)
|
|||
*/
|
||||
inode = file->f_mapping->host;
|
||||
if (S_ISBLK(inode->i_mode)) {
|
||||
struct request_queue *q = bdev_get_queue(inode->i_bdev);
|
||||
struct request_queue *q = bdev_get_queue(I_BDEV(inode));
|
||||
unsigned long long dev_size;
|
||||
|
||||
fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev);
|
||||
fd_dev->fd_block_size = bdev_logical_block_size(I_BDEV(inode));
|
||||
/*
|
||||
* Determine the number of bytes from i_size_read() minus
|
||||
* one (1) logical sector from underlying struct block_device
|
||||
|
@ -559,7 +559,7 @@ fd_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
|
|||
|
||||
if (S_ISBLK(inode->i_mode)) {
|
||||
/* The backend is block device, use discard */
|
||||
struct block_device *bdev = inode->i_bdev;
|
||||
struct block_device *bdev = I_BDEV(inode);
|
||||
struct se_device *dev = cmd->se_dev;
|
||||
|
||||
ret = blkdev_issue_discard(bdev,
|
||||
|
|
|
@ -1029,9 +1029,8 @@ static sector_t pscsi_get_blocks(struct se_device *dev)
|
|||
{
|
||||
struct pscsi_dev_virt *pdv = PSCSI_DEV(dev);
|
||||
|
||||
if (pdv->pdv_bd && pdv->pdv_bd->bd_part)
|
||||
return pdv->pdv_bd->bd_part->nr_sects;
|
||||
|
||||
if (pdv->pdv_bd)
|
||||
return bdev_nr_sectors(pdv->pdv_bd);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -204,7 +204,7 @@ int fsg_lun_open(struct fsg_lun *curlun, const char *filename)
|
|||
if (!(filp->f_mode & FMODE_WRITE))
|
||||
ro = 1;
|
||||
|
||||
inode = file_inode(filp);
|
||||
inode = filp->f_mapping->host;
|
||||
if ((!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))) {
|
||||
LINFO(curlun, "invalid file type: %s\n", filename);
|
||||
goto out;
|
||||
|
@ -221,7 +221,7 @@ int fsg_lun_open(struct fsg_lun *curlun, const char *filename)
|
|||
if (!(filp->f_mode & FMODE_CAN_WRITE))
|
||||
ro = 1;
|
||||
|
||||
size = i_size_read(inode->i_mapping->host);
|
||||
size = i_size_read(inode);
|
||||
if (size < 0) {
|
||||
LINFO(curlun, "unable to find file size: %s\n", filename);
|
||||
rc = (int) size;
|
||||
|
@ -231,8 +231,8 @@ int fsg_lun_open(struct fsg_lun *curlun, const char *filename)
|
|||
if (curlun->cdrom) {
|
||||
blksize = 2048;
|
||||
blkbits = 11;
|
||||
} else if (inode->i_bdev) {
|
||||
blksize = bdev_logical_block_size(inode->i_bdev);
|
||||
} else if (S_ISBLK(inode->i_mode)) {
|
||||
blksize = bdev_logical_block_size(I_BDEV(inode));
|
||||
blkbits = blksize_bits(blksize);
|
||||
} else {
|
||||
blksize = 512;
|
||||
|
|
799
fs/block_dev.c
799
fs/block_dev.c
File diff suppressed because it is too large
Load Diff
|
@ -1343,8 +1343,6 @@ int btrfs_sysfs_add_space_info_type(struct btrfs_fs_info *fs_info,
|
|||
|
||||
void btrfs_sysfs_remove_device(struct btrfs_device *device)
|
||||
{
|
||||
struct hd_struct *disk;
|
||||
struct kobject *disk_kobj;
|
||||
struct kobject *devices_kobj;
|
||||
|
||||
/*
|
||||
|
@ -1354,11 +1352,8 @@ void btrfs_sysfs_remove_device(struct btrfs_device *device)
|
|||
devices_kobj = device->fs_info->fs_devices->devices_kobj;
|
||||
ASSERT(devices_kobj);
|
||||
|
||||
if (device->bdev) {
|
||||
disk = device->bdev->bd_part;
|
||||
disk_kobj = &part_to_dev(disk)->kobj;
|
||||
sysfs_remove_link(devices_kobj, disk_kobj->name);
|
||||
}
|
||||
if (device->bdev)
|
||||
sysfs_remove_link(devices_kobj, bdev_kobj(device->bdev)->name);
|
||||
|
||||
if (device->devid_kobj.state_initialized) {
|
||||
kobject_del(&device->devid_kobj);
|
||||
|
@ -1464,11 +1459,7 @@ int btrfs_sysfs_add_device(struct btrfs_device *device)
|
|||
nofs_flag = memalloc_nofs_save();
|
||||
|
||||
if (device->bdev) {
|
||||
struct hd_struct *disk;
|
||||
struct kobject *disk_kobj;
|
||||
|
||||
disk = device->bdev->bd_part;
|
||||
disk_kobj = &part_to_dev(disk)->kobj;
|
||||
struct kobject *disk_kobj = bdev_kobj(device->bdev);
|
||||
|
||||
ret = sysfs_create_link(devices_kobj, disk_kobj, disk_kobj->name);
|
||||
if (ret) {
|
||||
|
|
|
@ -935,16 +935,16 @@ static noinline struct btrfs_device *device_list_add(const char *path,
|
|||
* make sure it's the same device if the device is mounted
|
||||
*/
|
||||
if (device->bdev) {
|
||||
struct block_device *path_bdev;
|
||||
int error;
|
||||
dev_t path_dev;
|
||||
|
||||
path_bdev = lookup_bdev(path);
|
||||
if (IS_ERR(path_bdev)) {
|
||||
error = lookup_bdev(path, &path_dev);
|
||||
if (error) {
|
||||
mutex_unlock(&fs_devices->device_list_mutex);
|
||||
return ERR_CAST(path_bdev);
|
||||
return ERR_PTR(error);
|
||||
}
|
||||
|
||||
if (device->bdev != path_bdev) {
|
||||
bdput(path_bdev);
|
||||
if (device->bdev->bd_dev != path_dev) {
|
||||
mutex_unlock(&fs_devices->device_list_mutex);
|
||||
/*
|
||||
* device->fs_info may not be reliable here, so
|
||||
|
@ -959,7 +959,6 @@ static noinline struct btrfs_device *device_list_add(const char *path,
|
|||
task_pid_nr(current));
|
||||
return ERR_PTR(-EEXIST);
|
||||
}
|
||||
bdput(path_bdev);
|
||||
btrfs_info_in_rcu(device->fs_info,
|
||||
"devid %llu device path %s changed to %s scanned by %s (%d)",
|
||||
devid, rcu_str_deref(device->name),
|
||||
|
|
|
@ -165,7 +165,7 @@ int btrfs_get_dev_zone_info(struct btrfs_device *device)
|
|||
if (!zone_info)
|
||||
return -ENOMEM;
|
||||
|
||||
nr_sectors = bdev->bd_part->nr_sects;
|
||||
nr_sectors = bdev_nr_sectors(bdev);
|
||||
zone_sectors = bdev_zone_sectors(bdev);
|
||||
/* Check if it's power of 2 (see is_power_of_2) */
|
||||
ASSERT(zone_sectors != 0 && (zone_sectors & (zone_sectors - 1)) == 0);
|
||||
|
@ -505,7 +505,7 @@ int btrfs_sb_log_location_bdev(struct block_device *bdev, int mirror, int rw,
|
|||
return -EINVAL;
|
||||
zone_size = zone_sectors << SECTOR_SHIFT;
|
||||
zone_sectors_shift = ilog2(zone_sectors);
|
||||
nr_sectors = bdev->bd_part->nr_sects;
|
||||
nr_sectors = bdev_nr_sectors(bdev);
|
||||
nr_zones = nr_sectors >> zone_sectors_shift;
|
||||
|
||||
sb_zone = sb_zone_number(zone_sectors_shift + SECTOR_SHIFT, mirror);
|
||||
|
@ -603,7 +603,7 @@ int btrfs_reset_sb_log_zones(struct block_device *bdev, int mirror)
|
|||
|
||||
zone_sectors = bdev_zone_sectors(bdev);
|
||||
zone_sectors_shift = ilog2(zone_sectors);
|
||||
nr_sectors = bdev->bd_part->nr_sects;
|
||||
nr_sectors = bdev_nr_sectors(bdev);
|
||||
nr_zones = nr_sectors >> zone_sectors_shift;
|
||||
|
||||
sb_zone = sb_zone_number(zone_sectors_shift + SECTOR_SHIFT, mirror);
|
||||
|
|
|
@ -523,7 +523,7 @@ static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
|
|||
|
||||
void emergency_thaw_bdev(struct super_block *sb)
|
||||
{
|
||||
while (sb->s_bdev && !thaw_bdev(sb->s_bdev, sb))
|
||||
while (sb->s_bdev && !thaw_bdev(sb->s_bdev))
|
||||
printk(KERN_WARNING "Emergency Thaw on %pg\n", sb->s_bdev);
|
||||
}
|
||||
|
||||
|
|
|
@ -624,7 +624,7 @@ static int ext4_shutdown(struct super_block *sb, unsigned long arg)
|
|||
case EXT4_GOING_FLAGS_DEFAULT:
|
||||
freeze_bdev(sb->s_bdev);
|
||||
set_bit(EXT4_FLAGS_SHUTDOWN, &sbi->s_ext4_flags);
|
||||
thaw_bdev(sb->s_bdev, sb);
|
||||
thaw_bdev(sb->s_bdev);
|
||||
break;
|
||||
case EXT4_GOING_FLAGS_LOGFLUSH:
|
||||
set_bit(EXT4_FLAGS_SHUTDOWN, &sbi->s_ext4_flags);
|
||||
|
|
|
@ -4044,9 +4044,8 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
|
|||
sbi->s_sb = sb;
|
||||
sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS;
|
||||
sbi->s_sb_block = sb_block;
|
||||
if (sb->s_bdev->bd_part)
|
||||
sbi->s_sectors_written_start =
|
||||
part_stat_read(sb->s_bdev->bd_part, sectors[STAT_WRITE]);
|
||||
sbi->s_sectors_written_start =
|
||||
part_stat_read(sb->s_bdev, sectors[STAT_WRITE]);
|
||||
|
||||
/* Cleanup superblock name */
|
||||
strreplace(sb->s_id, '/', '!');
|
||||
|
@ -5505,15 +5504,10 @@ static int ext4_commit_super(struct super_block *sb, int sync)
|
|||
*/
|
||||
if (!(sb->s_flags & SB_RDONLY))
|
||||
ext4_update_tstamp(es, s_wtime);
|
||||
if (sb->s_bdev->bd_part)
|
||||
es->s_kbytes_written =
|
||||
cpu_to_le64(EXT4_SB(sb)->s_kbytes_written +
|
||||
((part_stat_read(sb->s_bdev->bd_part,
|
||||
sectors[STAT_WRITE]) -
|
||||
EXT4_SB(sb)->s_sectors_written_start) >> 1));
|
||||
else
|
||||
es->s_kbytes_written =
|
||||
cpu_to_le64(EXT4_SB(sb)->s_kbytes_written);
|
||||
es->s_kbytes_written =
|
||||
cpu_to_le64(EXT4_SB(sb)->s_kbytes_written +
|
||||
((part_stat_read(sb->s_bdev, sectors[STAT_WRITE]) -
|
||||
EXT4_SB(sb)->s_sectors_written_start) >> 1));
|
||||
if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeclusters_counter))
|
||||
ext4_free_blocks_count_set(es,
|
||||
EXT4_C2B(EXT4_SB(sb), percpu_counter_sum_positive(
|
||||
|
|
|
@ -62,11 +62,8 @@ static ssize_t session_write_kbytes_show(struct ext4_sb_info *sbi, char *buf)
|
|||
{
|
||||
struct super_block *sb = sbi->s_buddy_cache->i_sb;
|
||||
|
||||
if (!sb->s_bdev->bd_part)
|
||||
return snprintf(buf, PAGE_SIZE, "0\n");
|
||||
return snprintf(buf, PAGE_SIZE, "%lu\n",
|
||||
(part_stat_read(sb->s_bdev->bd_part,
|
||||
sectors[STAT_WRITE]) -
|
||||
(part_stat_read(sb->s_bdev, sectors[STAT_WRITE]) -
|
||||
sbi->s_sectors_written_start) >> 1);
|
||||
}
|
||||
|
||||
|
@ -74,12 +71,9 @@ static ssize_t lifetime_write_kbytes_show(struct ext4_sb_info *sbi, char *buf)
|
|||
{
|
||||
struct super_block *sb = sbi->s_buddy_cache->i_sb;
|
||||
|
||||
if (!sb->s_bdev->bd_part)
|
||||
return snprintf(buf, PAGE_SIZE, "0\n");
|
||||
return snprintf(buf, PAGE_SIZE, "%llu\n",
|
||||
(unsigned long long)(sbi->s_kbytes_written +
|
||||
((part_stat_read(sb->s_bdev->bd_part,
|
||||
sectors[STAT_WRITE]) -
|
||||
((part_stat_read(sb->s_bdev, sectors[STAT_WRITE]) -
|
||||
EXT4_SB(sb)->s_sectors_written_start) >> 1)));
|
||||
}
|
||||
|
||||
|
|
|
@ -1395,7 +1395,6 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
|
|||
__u32 crc32 = 0;
|
||||
int i;
|
||||
int cp_payload_blks = __cp_payload(sbi);
|
||||
struct super_block *sb = sbi->sb;
|
||||
struct curseg_info *seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
|
||||
u64 kbytes_written;
|
||||
int err;
|
||||
|
@ -1489,9 +1488,7 @@ static int do_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc)
|
|||
start_blk += data_sum_blocks;
|
||||
|
||||
/* Record write statistics in the hot node summary */
|
||||
kbytes_written = sbi->kbytes_written;
|
||||
if (sb->s_bdev->bd_part)
|
||||
kbytes_written += BD_PART_WRITTEN(sbi);
|
||||
kbytes_written = sbi->kbytes_written + BD_PART_WRITTEN(sbi);
|
||||
|
||||
seg_i->journal->info.kbytes_written = cpu_to_le64(kbytes_written);
|
||||
|
||||
|
|
|
@ -1675,7 +1675,7 @@ static inline bool f2fs_is_multi_device(struct f2fs_sb_info *sbi)
|
|||
* and the return value is in kbytes. s is of struct f2fs_sb_info.
|
||||
*/
|
||||
#define BD_PART_WRITTEN(s) \
|
||||
(((u64)part_stat_read((s)->sb->s_bdev->bd_part, sectors[STAT_WRITE]) - \
|
||||
(((u64)part_stat_read((s)->sb->s_bdev, sectors[STAT_WRITE]) - \
|
||||
(s)->sectors_written_start) >> 1)
|
||||
|
||||
static inline void f2fs_update_time(struct f2fs_sb_info *sbi, int type)
|
||||
|
|
|
@ -2230,16 +2230,12 @@ static int f2fs_ioc_shutdown(struct file *filp, unsigned long arg)
|
|||
|
||||
switch (in) {
|
||||
case F2FS_GOING_DOWN_FULLSYNC:
|
||||
sb = freeze_bdev(sb->s_bdev);
|
||||
if (IS_ERR(sb)) {
|
||||
ret = PTR_ERR(sb);
|
||||
ret = freeze_bdev(sb->s_bdev);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
if (sb) {
|
||||
f2fs_stop_checkpoint(sbi, false);
|
||||
set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
|
||||
thaw_bdev(sb->s_bdev, sb);
|
||||
}
|
||||
f2fs_stop_checkpoint(sbi, false);
|
||||
set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
|
||||
thaw_bdev(sb->s_bdev);
|
||||
break;
|
||||
case F2FS_GOING_DOWN_METASYNC:
|
||||
/* do checkpoint only */
|
||||
|
|
|
@ -3151,7 +3151,7 @@ static int f2fs_report_zone_cb(struct blk_zone *zone, unsigned int idx,
|
|||
static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
|
||||
{
|
||||
struct block_device *bdev = FDEV(devi).bdev;
|
||||
sector_t nr_sectors = bdev->bd_part->nr_sects;
|
||||
sector_t nr_sectors = bdev_nr_sectors(bdev);
|
||||
struct f2fs_report_zones_args rep_zone_arg;
|
||||
int ret;
|
||||
|
||||
|
@ -3700,10 +3700,8 @@ static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
|
|||
}
|
||||
|
||||
/* For write statistics */
|
||||
if (sb->s_bdev->bd_part)
|
||||
sbi->sectors_written_start =
|
||||
(u64)part_stat_read(sb->s_bdev->bd_part,
|
||||
sectors[STAT_WRITE]);
|
||||
sbi->sectors_written_start =
|
||||
(u64)part_stat_read(sb->s_bdev, sectors[STAT_WRITE]);
|
||||
|
||||
/* Read accumulated write IO statistics if exists */
|
||||
seg_i = CURSEG_I(sbi, CURSEG_HOT_NODE);
|
||||
|
|
|
@ -90,11 +90,6 @@ static ssize_t free_segments_show(struct f2fs_attr *a,
|
|||
static ssize_t lifetime_write_kbytes_show(struct f2fs_attr *a,
|
||||
struct f2fs_sb_info *sbi, char *buf)
|
||||
{
|
||||
struct super_block *sb = sbi->sb;
|
||||
|
||||
if (!sb->s_bdev->bd_part)
|
||||
return sprintf(buf, "0\n");
|
||||
|
||||
return sprintf(buf, "%llu\n",
|
||||
(unsigned long long)(sbi->kbytes_written +
|
||||
BD_PART_WRITTEN(sbi)));
|
||||
|
@ -103,12 +98,8 @@ static ssize_t lifetime_write_kbytes_show(struct f2fs_attr *a,
|
|||
static ssize_t features_show(struct f2fs_attr *a,
|
||||
struct f2fs_sb_info *sbi, char *buf)
|
||||
{
|
||||
struct super_block *sb = sbi->sb;
|
||||
int len = 0;
|
||||
|
||||
if (!sb->s_bdev->bd_part)
|
||||
return sprintf(buf, "0\n");
|
||||
|
||||
if (f2fs_sb_has_encrypt(sbi))
|
||||
len += scnprintf(buf, PAGE_SIZE - len, "%s",
|
||||
"encryption");
|
||||
|
|
|
@ -155,7 +155,6 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
|
|||
inode->i_bytes = 0;
|
||||
inode->i_generation = 0;
|
||||
inode->i_pipe = NULL;
|
||||
inode->i_bdev = NULL;
|
||||
inode->i_cdev = NULL;
|
||||
inode->i_link = NULL;
|
||||
inode->i_dir_seq = 0;
|
||||
|
@ -580,8 +579,6 @@ static void evict(struct inode *inode)
|
|||
truncate_inode_pages_final(&inode->i_data);
|
||||
clear_inode(inode);
|
||||
}
|
||||
if (S_ISBLK(inode->i_mode) && inode->i_bdev)
|
||||
bd_forget(inode);
|
||||
if (S_ISCHR(inode->i_mode) && inode->i_cdev)
|
||||
cd_forget(inode);
|
||||
|
||||
|
|
|
@ -25,7 +25,6 @@ extern void __init bdev_cache_init(void);
|
|||
extern int __sync_blockdev(struct block_device *bdev, int wait);
|
||||
void iterate_bdevs(void (*)(struct block_device *, void *), void *);
|
||||
void emergency_thaw_bdev(struct super_block *sb);
|
||||
void bd_forget(struct inode *inode);
|
||||
#else
|
||||
static inline void bdev_cache_init(void)
|
||||
{
|
||||
|
@ -43,9 +42,6 @@ static inline int emergency_thaw_bdev(struct super_block *sb)
|
|||
{
|
||||
return 0;
|
||||
}
|
||||
static inline void bd_forget(struct inode *inode)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_BLOCK */
|
||||
|
||||
/*
|
||||
|
@ -116,7 +112,8 @@ extern struct file *alloc_empty_file_noaccount(int, const struct cred *);
|
|||
*/
|
||||
extern int reconfigure_super(struct fs_context *);
|
||||
extern bool trylock_super(struct super_block *sb);
|
||||
extern struct super_block *user_get_super(dev_t);
|
||||
struct super_block *user_get_super(dev_t, bool excl);
|
||||
void put_super(struct super_block *sb);
|
||||
extern bool mount_capable(struct fs_context *);
|
||||
|
||||
/*
|
||||
|
|
|
@ -2802,11 +2802,7 @@ static struct file *__io_file_get(struct io_submit_state *state, int fd)
|
|||
|
||||
static bool io_bdev_nowait(struct block_device *bdev)
|
||||
{
|
||||
#ifdef CONFIG_BLOCK
|
||||
return !bdev || blk_queue_nowait(bdev_get_queue(bdev));
|
||||
#else
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2819,14 +2815,16 @@ static bool io_file_supports_async(struct file *file, int rw)
|
|||
umode_t mode = file_inode(file)->i_mode;
|
||||
|
||||
if (S_ISBLK(mode)) {
|
||||
if (io_bdev_nowait(file->f_inode->i_bdev))
|
||||
if (IS_ENABLED(CONFIG_BLOCK) &&
|
||||
io_bdev_nowait(I_BDEV(file->f_mapping->host)))
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
if (S_ISCHR(mode) || S_ISSOCK(mode))
|
||||
return true;
|
||||
if (S_ISREG(mode)) {
|
||||
if (io_bdev_nowait(file->f_inode->i_sb->s_bdev) &&
|
||||
if (IS_ENABLED(CONFIG_BLOCK) &&
|
||||
io_bdev_nowait(file->f_inode->i_sb->s_bdev) &&
|
||||
file->f_op != &io_uring_fops)
|
||||
return true;
|
||||
return false;
|
||||
|
|
|
@ -1342,9 +1342,8 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg)
|
|||
}
|
||||
|
||||
/*
|
||||
* After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same
|
||||
* location, so checking ->i_pipe is not enough to verify that this is a
|
||||
* pipe.
|
||||
* Note that i_pipe and i_cdev share the same location, so checking ->i_pipe is
|
||||
* not enough to verify that this is a pipe.
|
||||
*/
|
||||
struct pipe_inode_info *get_pipe_info(struct file *file, bool for_splice)
|
||||
{
|
||||
|
|
|
@ -244,7 +244,7 @@ static struct block_device *psblk_get_bdev(void *holder,
|
|||
return bdev;
|
||||
}
|
||||
|
||||
nr_sects = part_nr_sects_read(bdev->bd_part);
|
||||
nr_sects = bdev_nr_sectors(bdev);
|
||||
if (!nr_sects) {
|
||||
pr_err("not enough space for '%s'\n", blkdev);
|
||||
blkdev_put(bdev, mode);
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
#include <linux/writeback.h>
|
||||
#include <linux/nospec.h>
|
||||
#include "compat.h"
|
||||
#include "../internal.h"
|
||||
|
||||
static int check_quotactl_permission(struct super_block *sb, int type, int cmd,
|
||||
qid_t id)
|
||||
|
@ -865,27 +866,42 @@ static bool quotactl_cmd_onoff(int cmd)
|
|||
static struct super_block *quotactl_block(const char __user *special, int cmd)
|
||||
{
|
||||
#ifdef CONFIG_BLOCK
|
||||
struct block_device *bdev;
|
||||
struct super_block *sb;
|
||||
struct filename *tmp = getname(special);
|
||||
bool excl = false, thawed = false;
|
||||
int error;
|
||||
dev_t dev;
|
||||
|
||||
if (IS_ERR(tmp))
|
||||
return ERR_CAST(tmp);
|
||||
bdev = lookup_bdev(tmp->name);
|
||||
error = lookup_bdev(tmp->name, &dev);
|
||||
putname(tmp);
|
||||
if (IS_ERR(bdev))
|
||||
return ERR_CAST(bdev);
|
||||
if (quotactl_cmd_onoff(cmd))
|
||||
sb = get_super_exclusive_thawed(bdev);
|
||||
else if (quotactl_cmd_write(cmd))
|
||||
sb = get_super_thawed(bdev);
|
||||
else
|
||||
sb = get_super(bdev);
|
||||
bdput(bdev);
|
||||
if (error)
|
||||
return ERR_PTR(error);
|
||||
|
||||
if (quotactl_cmd_onoff(cmd)) {
|
||||
excl = true;
|
||||
thawed = true;
|
||||
} else if (quotactl_cmd_write(cmd)) {
|
||||
thawed = true;
|
||||
}
|
||||
|
||||
retry:
|
||||
sb = user_get_super(dev, excl);
|
||||
if (!sb)
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
if (thawed && sb->s_writers.frozen != SB_UNFROZEN) {
|
||||
if (excl)
|
||||
up_write(&sb->s_umount);
|
||||
else
|
||||
up_read(&sb->s_umount);
|
||||
wait_event(sb->s_writers.wait_unfrozen,
|
||||
sb->s_writers.frozen == SB_UNFROZEN);
|
||||
put_super(sb);
|
||||
goto retry;
|
||||
}
|
||||
return sb;
|
||||
|
||||
#else
|
||||
return ERR_PTR(-ENODEV);
|
||||
#endif
|
||||
|
|
|
@ -235,7 +235,7 @@ SYSCALL_DEFINE3(fstatfs64, unsigned int, fd, size_t, sz, struct statfs64 __user
|
|||
|
||||
static int vfs_ustat(dev_t dev, struct kstatfs *sbuf)
|
||||
{
|
||||
struct super_block *s = user_get_super(dev);
|
||||
struct super_block *s = user_get_super(dev, false);
|
||||
int err;
|
||||
if (!s)
|
||||
return -EINVAL;
|
||||
|
|
93
fs/super.c
93
fs/super.c
|
@ -307,7 +307,7 @@ static void __put_super(struct super_block *s)
|
|||
* Drops a temporary reference, frees superblock if there's no
|
||||
* references left.
|
||||
*/
|
||||
static void put_super(struct super_block *sb)
|
||||
void put_super(struct super_block *sb)
|
||||
{
|
||||
spin_lock(&sb_lock);
|
||||
__put_super(sb);
|
||||
|
@ -740,7 +740,14 @@ void iterate_supers_type(struct file_system_type *type,
|
|||
|
||||
EXPORT_SYMBOL(iterate_supers_type);
|
||||
|
||||
static struct super_block *__get_super(struct block_device *bdev, bool excl)
|
||||
/**
|
||||
* get_super - get the superblock of a device
|
||||
* @bdev: device to get the superblock for
|
||||
*
|
||||
* Scans the superblock list and finds the superblock of the file system
|
||||
* mounted on the device given. %NULL is returned if no match is found.
|
||||
*/
|
||||
struct super_block *get_super(struct block_device *bdev)
|
||||
{
|
||||
struct super_block *sb;
|
||||
|
||||
|
@ -755,17 +762,11 @@ static struct super_block *__get_super(struct block_device *bdev, bool excl)
|
|||
if (sb->s_bdev == bdev) {
|
||||
sb->s_count++;
|
||||
spin_unlock(&sb_lock);
|
||||
if (!excl)
|
||||
down_read(&sb->s_umount);
|
||||
else
|
||||
down_write(&sb->s_umount);
|
||||
down_read(&sb->s_umount);
|
||||
/* still alive? */
|
||||
if (sb->s_root && (sb->s_flags & SB_BORN))
|
||||
return sb;
|
||||
if (!excl)
|
||||
up_read(&sb->s_umount);
|
||||
else
|
||||
up_write(&sb->s_umount);
|
||||
up_read(&sb->s_umount);
|
||||
/* nope, got unmounted */
|
||||
spin_lock(&sb_lock);
|
||||
__put_super(sb);
|
||||
|
@ -776,66 +777,6 @@ static struct super_block *__get_super(struct block_device *bdev, bool excl)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* get_super - get the superblock of a device
|
||||
* @bdev: device to get the superblock for
|
||||
*
|
||||
* Scans the superblock list and finds the superblock of the file system
|
||||
* mounted on the device given. %NULL is returned if no match is found.
|
||||
*/
|
||||
struct super_block *get_super(struct block_device *bdev)
|
||||
{
|
||||
return __get_super(bdev, false);
|
||||
}
|
||||
EXPORT_SYMBOL(get_super);
|
||||
|
||||
static struct super_block *__get_super_thawed(struct block_device *bdev,
|
||||
bool excl)
|
||||
{
|
||||
while (1) {
|
||||
struct super_block *s = __get_super(bdev, excl);
|
||||
if (!s || s->s_writers.frozen == SB_UNFROZEN)
|
||||
return s;
|
||||
if (!excl)
|
||||
up_read(&s->s_umount);
|
||||
else
|
||||
up_write(&s->s_umount);
|
||||
wait_event(s->s_writers.wait_unfrozen,
|
||||
s->s_writers.frozen == SB_UNFROZEN);
|
||||
put_super(s);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* get_super_thawed - get thawed superblock of a device
|
||||
* @bdev: device to get the superblock for
|
||||
*
|
||||
* Scans the superblock list and finds the superblock of the file system
|
||||
* mounted on the device. The superblock is returned once it is thawed
|
||||
* (or immediately if it was not frozen). %NULL is returned if no match
|
||||
* is found.
|
||||
*/
|
||||
struct super_block *get_super_thawed(struct block_device *bdev)
|
||||
{
|
||||
return __get_super_thawed(bdev, false);
|
||||
}
|
||||
EXPORT_SYMBOL(get_super_thawed);
|
||||
|
||||
/**
|
||||
* get_super_exclusive_thawed - get thawed superblock of a device
|
||||
* @bdev: device to get the superblock for
|
||||
*
|
||||
* Scans the superblock list and finds the superblock of the file system
|
||||
* mounted on the device. The superblock is returned once it is thawed
|
||||
* (or immediately if it was not frozen) and s_umount semaphore is held
|
||||
* in exclusive mode. %NULL is returned if no match is found.
|
||||
*/
|
||||
struct super_block *get_super_exclusive_thawed(struct block_device *bdev)
|
||||
{
|
||||
return __get_super_thawed(bdev, true);
|
||||
}
|
||||
EXPORT_SYMBOL(get_super_exclusive_thawed);
|
||||
|
||||
/**
|
||||
* get_active_super - get an active reference to the superblock of a device
|
||||
* @bdev: device to get the superblock for
|
||||
|
@ -867,7 +808,7 @@ struct super_block *get_active_super(struct block_device *bdev)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
struct super_block *user_get_super(dev_t dev)
|
||||
struct super_block *user_get_super(dev_t dev, bool excl)
|
||||
{
|
||||
struct super_block *sb;
|
||||
|
||||
|
@ -879,11 +820,17 @@ struct super_block *user_get_super(dev_t dev)
|
|||
if (sb->s_dev == dev) {
|
||||
sb->s_count++;
|
||||
spin_unlock(&sb_lock);
|
||||
down_read(&sb->s_umount);
|
||||
if (excl)
|
||||
down_write(&sb->s_umount);
|
||||
else
|
||||
down_read(&sb->s_umount);
|
||||
/* still alive? */
|
||||
if (sb->s_root && (sb->s_flags & SB_BORN))
|
||||
return sb;
|
||||
up_read(&sb->s_umount);
|
||||
if (excl)
|
||||
up_write(&sb->s_umount);
|
||||
else
|
||||
up_read(&sb->s_umount);
|
||||
/* nope, got unmounted */
|
||||
spin_lock(&sb_lock);
|
||||
__put_super(sb);
|
||||
|
|
|
@ -433,13 +433,10 @@ xfs_fs_goingdown(
|
|||
{
|
||||
switch (inflags) {
|
||||
case XFS_FSOP_GOING_FLAGS_DEFAULT: {
|
||||
struct super_block *sb = freeze_bdev(mp->m_super->s_bdev);
|
||||
|
||||
if (sb && !IS_ERR(sb)) {
|
||||
if (!freeze_bdev(mp->m_super->s_bdev)) {
|
||||
xfs_force_shutdown(mp, SHUTDOWN_FORCE_UMOUNT);
|
||||
thaw_bdev(sb->s_bdev, sb);
|
||||
thaw_bdev(mp->m_super->s_bdev);
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
case XFS_FSOP_GOING_FLAGS_LOGFLUSH:
|
||||
|
|
|
@ -148,11 +148,24 @@ static inline void bio_advance_iter(const struct bio *bio,
|
|||
/* TODO: It is reasonable to complete bio with error here. */
|
||||
}
|
||||
|
||||
/* @bytes should be less or equal to bvec[i->bi_idx].bv_len */
|
||||
static inline void bio_advance_iter_single(const struct bio *bio,
|
||||
struct bvec_iter *iter,
|
||||
unsigned int bytes)
|
||||
{
|
||||
iter->bi_sector += bytes >> 9;
|
||||
|
||||
if (bio_no_advance_iter(bio))
|
||||
iter->bi_size -= bytes;
|
||||
else
|
||||
bvec_iter_advance_single(bio->bi_io_vec, iter, bytes);
|
||||
}
|
||||
|
||||
#define __bio_for_each_segment(bvl, bio, iter, start) \
|
||||
for (iter = (start); \
|
||||
(iter).bi_size && \
|
||||
((bvl = bio_iter_iovec((bio), (iter))), 1); \
|
||||
bio_advance_iter((bio), &(iter), (bvl).bv_len))
|
||||
bio_advance_iter_single((bio), &(iter), (bvl).bv_len))
|
||||
|
||||
#define bio_for_each_segment(bvl, bio, iter) \
|
||||
__bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter)
|
||||
|
@ -161,7 +174,7 @@ static inline void bio_advance_iter(const struct bio *bio,
|
|||
for (iter = (start); \
|
||||
(iter).bi_size && \
|
||||
((bvl = mp_bvec_iter_bvec((bio)->bi_io_vec, (iter))), 1); \
|
||||
bio_advance_iter((bio), &(iter), (bvl).bv_len))
|
||||
bio_advance_iter_single((bio), &(iter), (bvl).bv_len))
|
||||
|
||||
/* iterate over multi-page bvec */
|
||||
#define bio_for_each_bvec(bvl, bio, iter) \
|
||||
|
@ -711,12 +724,6 @@ static inline bool bioset_initialized(struct bio_set *bs)
|
|||
return bs->bio_slab != NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* a small number of entries is fine, not going to be performance critical.
|
||||
* basically we just need to survive
|
||||
*/
|
||||
#define BIO_SPLIT_ENTRIES 2
|
||||
|
||||
#if defined(CONFIG_BLK_DEV_INTEGRITY)
|
||||
|
||||
#define bip_for_each_vec(bvl, bip, iter) \
|
||||
|
|
|
@ -197,12 +197,12 @@ void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
|
|||
u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
|
||||
|
||||
struct blkg_conf_ctx {
|
||||
struct gendisk *disk;
|
||||
struct block_device *bdev;
|
||||
struct blkcg_gq *blkg;
|
||||
char *body;
|
||||
};
|
||||
|
||||
struct gendisk *blkcg_conf_get_disk(char **inputp);
|
||||
struct block_device *blkcg_conf_open_bdev(char **inputp);
|
||||
int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
|
||||
char *input, struct blkg_conf_ctx *ctx);
|
||||
void blkg_conf_finish(struct blkg_conf_ctx *ctx);
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
#include <linux/blkdev.h>
|
||||
#include <linux/sbitmap.h>
|
||||
#include <linux/srcu.h>
|
||||
#include <linux/lockdep.h>
|
||||
|
||||
struct blk_mq_tags;
|
||||
struct blk_flush_queue;
|
||||
|
@ -594,5 +595,7 @@ static inline void blk_mq_cleanup_rq(struct request *rq)
|
|||
}
|
||||
|
||||
blk_qc_t blk_mq_submit_bio(struct bio *bio);
|
||||
void blk_mq_hctx_set_fq_lock_class(struct blk_mq_hw_ctx *hctx,
|
||||
struct lock_class_key *key);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
|
||||
#include <linux/types.h>
|
||||
#include <linux/bvec.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/ktime.h>
|
||||
|
||||
struct bio_set;
|
||||
|
@ -20,21 +21,25 @@ typedef void (bio_end_io_t) (struct bio *);
|
|||
struct bio_crypt_ctx;
|
||||
|
||||
struct block_device {
|
||||
sector_t bd_start_sect;
|
||||
struct disk_stats __percpu *bd_stats;
|
||||
unsigned long bd_stamp;
|
||||
bool bd_read_only; /* read-only policy */
|
||||
dev_t bd_dev;
|
||||
int bd_openers;
|
||||
struct inode * bd_inode; /* will die */
|
||||
struct super_block * bd_super;
|
||||
struct mutex bd_mutex; /* open/close mutex */
|
||||
void * bd_claiming;
|
||||
struct device bd_device;
|
||||
void * bd_holder;
|
||||
int bd_holders;
|
||||
bool bd_write_holder;
|
||||
#ifdef CONFIG_SYSFS
|
||||
struct list_head bd_holder_disks;
|
||||
#endif
|
||||
struct block_device * bd_contains;
|
||||
struct kobject *bd_holder_dir;
|
||||
u8 bd_partno;
|
||||
struct hd_struct * bd_part;
|
||||
/* number of times partitions within this device have been opened. */
|
||||
unsigned bd_part_count;
|
||||
|
||||
|
@ -46,8 +51,23 @@ struct block_device {
|
|||
int bd_fsfreeze_count;
|
||||
/* Mutex for freeze */
|
||||
struct mutex bd_fsfreeze_mutex;
|
||||
struct super_block *bd_fsfreeze_sb;
|
||||
|
||||
struct partition_meta_info *bd_meta_info;
|
||||
#ifdef CONFIG_FAIL_MAKE_REQUEST
|
||||
bool bd_make_it_fail;
|
||||
#endif
|
||||
} __randomize_layout;
|
||||
|
||||
#define bdev_whole(_bdev) \
|
||||
((_bdev)->bd_disk->part0)
|
||||
|
||||
#define dev_to_bdev(device) \
|
||||
container_of((device), struct block_device, bd_device)
|
||||
|
||||
#define bdev_kobj(_bdev) \
|
||||
(&((_bdev)->bd_device.kobj))
|
||||
|
||||
/*
|
||||
* Block error status values. See block/blk-core:blk_errors for the details.
|
||||
* Alpha cannot write a byte atomically, so we need to use 32-bit value.
|
||||
|
|
|
@ -191,7 +191,7 @@ struct request {
|
|||
};
|
||||
|
||||
struct gendisk *rq_disk;
|
||||
struct hd_struct *part;
|
||||
struct block_device *part;
|
||||
#ifdef CONFIG_BLK_RQ_ALLOC_TIME
|
||||
/* Time that the first bio started allocating this request. */
|
||||
u64 alloc_time_ns;
|
||||
|
@ -1491,7 +1491,7 @@ static inline int bdev_alignment_offset(struct block_device *bdev)
|
|||
return -1;
|
||||
if (bdev_is_partition(bdev))
|
||||
return queue_limit_alignment_offset(&q->limits,
|
||||
bdev->bd_part->start_sect);
|
||||
bdev->bd_start_sect);
|
||||
return q->limits.alignment_offset;
|
||||
}
|
||||
|
||||
|
@ -1532,7 +1532,7 @@ static inline int bdev_discard_alignment(struct block_device *bdev)
|
|||
|
||||
if (bdev_is_partition(bdev))
|
||||
return queue_limit_discard_alignment(&q->limits,
|
||||
bdev->bd_part->start_sect);
|
||||
bdev->bd_start_sect);
|
||||
return q->limits.discard_alignment;
|
||||
}
|
||||
|
||||
|
@ -1853,6 +1853,7 @@ struct block_device_operations {
|
|||
void (*unlock_native_capacity) (struct gendisk *);
|
||||
int (*revalidate_disk) (struct gendisk *);
|
||||
int (*getgeo)(struct block_device *, struct hd_geometry *);
|
||||
int (*set_read_only)(struct block_device *bdev, bool ro);
|
||||
/* this callback is with swap_lock and sometimes page table lock held */
|
||||
void (*swap_slot_free_notify) (struct block_device *, unsigned long);
|
||||
int (*report_zones)(struct gendisk *, sector_t sector,
|
||||
|
@ -1869,8 +1870,6 @@ extern int blkdev_compat_ptr_ioctl(struct block_device *, fmode_t,
|
|||
#define blkdev_compat_ptr_ioctl NULL
|
||||
#endif
|
||||
|
||||
extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
|
||||
unsigned long);
|
||||
extern int bdev_read_page(struct block_device *, sector_t, struct page *);
|
||||
extern int bdev_write_page(struct block_device *, sector_t, struct page *,
|
||||
struct writeback_control *);
|
||||
|
@ -1947,9 +1946,9 @@ unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors,
|
|||
void disk_end_io_acct(struct gendisk *disk, unsigned int op,
|
||||
unsigned long start_time);
|
||||
|
||||
unsigned long part_start_io_acct(struct gendisk *disk, struct hd_struct **part,
|
||||
struct bio *bio);
|
||||
void part_end_io_acct(struct hd_struct *part, struct bio *bio,
|
||||
unsigned long part_start_io_acct(struct gendisk *disk,
|
||||
struct block_device **part, struct bio *bio);
|
||||
void part_end_io_acct(struct block_device *part, struct bio *bio,
|
||||
unsigned long start_time);
|
||||
|
||||
/**
|
||||
|
@ -1977,7 +1976,7 @@ int bdev_read_only(struct block_device *bdev);
|
|||
int set_blocksize(struct block_device *bdev, int size);
|
||||
|
||||
const char *bdevname(struct block_device *bdev, char *buffer);
|
||||
struct block_device *lookup_bdev(const char *);
|
||||
int lookup_bdev(const char *pathname, dev_t *dev);
|
||||
|
||||
void blkdev_show(struct seq_file *seqf, off_t offset);
|
||||
|
||||
|
@ -1992,14 +1991,17 @@ void blkdev_show(struct seq_file *seqf, off_t offset);
|
|||
struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
|
||||
void *holder);
|
||||
struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder);
|
||||
int bd_prepare_to_claim(struct block_device *bdev, struct block_device *whole,
|
||||
void *holder);
|
||||
void bd_abort_claiming(struct block_device *bdev, struct block_device *whole,
|
||||
void *holder);
|
||||
int bd_prepare_to_claim(struct block_device *bdev, void *holder);
|
||||
void bd_abort_claiming(struct block_device *bdev, void *holder);
|
||||
void blkdev_put(struct block_device *bdev, fmode_t mode);
|
||||
|
||||
/* just for blk-cgroup, don't use elsewhere */
|
||||
struct block_device *blkdev_get_no_open(dev_t dev);
|
||||
void blkdev_put_no_open(struct block_device *bdev);
|
||||
|
||||
struct block_device *bdev_alloc(struct gendisk *disk, u8 partno);
|
||||
void bdev_add(struct block_device *bdev, dev_t dev);
|
||||
struct block_device *I_BDEV(struct inode *inode);
|
||||
struct block_device *bdget_part(struct hd_struct *part);
|
||||
struct block_device *bdgrab(struct block_device *bdev);
|
||||
void bdput(struct block_device *);
|
||||
|
||||
|
@ -2024,7 +2026,7 @@ static inline int sync_blockdev(struct block_device *bdev)
|
|||
#endif
|
||||
int fsync_bdev(struct block_device *bdev);
|
||||
|
||||
struct super_block *freeze_bdev(struct block_device *bdev);
|
||||
int thaw_bdev(struct block_device *bdev, struct super_block *sb);
|
||||
int freeze_bdev(struct block_device *bdev);
|
||||
int thaw_bdev(struct block_device *bdev);
|
||||
|
||||
#endif /* _LINUX_BLKDEV_H */
|
||||
|
|
|
@ -75,8 +75,7 @@ static inline bool blk_trace_note_message_enabled(struct request_queue *q)
|
|||
return ret;
|
||||
}
|
||||
|
||||
extern void blk_add_driver_data(struct request_queue *q, struct request *rq,
|
||||
void *data, size_t len);
|
||||
extern void blk_add_driver_data(struct request *rq, void *data, size_t len);
|
||||
extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
|
||||
struct block_device *bdev,
|
||||
char __user *arg);
|
||||
|
@ -90,7 +89,7 @@ extern struct attribute_group blk_trace_attr_group;
|
|||
#else /* !CONFIG_BLK_DEV_IO_TRACE */
|
||||
# define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY)
|
||||
# define blk_trace_shutdown(q) do { } while (0)
|
||||
# define blk_add_driver_data(q, rq, data, len) do {} while (0)
|
||||
# define blk_add_driver_data(rq, data, len) do {} while (0)
|
||||
# define blk_trace_setup(q, name, dev, bdev, arg) (-ENOTTY)
|
||||
# define blk_trace_startstop(q, start) (-ENOTTY)
|
||||
# define blk_trace_remove(q) (-ENOTTY)
|
||||
|
|
|
@ -121,18 +121,28 @@ static inline bool bvec_iter_advance(const struct bio_vec *bv,
|
|||
return true;
|
||||
}
|
||||
|
||||
static inline void bvec_iter_skip_zero_bvec(struct bvec_iter *iter)
|
||||
/*
|
||||
* A simpler version of bvec_iter_advance(), @bytes should not span
|
||||
* across multiple bvec entries, i.e. bytes <= bv[i->bi_idx].bv_len
|
||||
*/
|
||||
static inline void bvec_iter_advance_single(const struct bio_vec *bv,
|
||||
struct bvec_iter *iter, unsigned int bytes)
|
||||
{
|
||||
iter->bi_bvec_done = 0;
|
||||
iter->bi_idx++;
|
||||
unsigned int done = iter->bi_bvec_done + bytes;
|
||||
|
||||
if (done == bv[iter->bi_idx].bv_len) {
|
||||
done = 0;
|
||||
iter->bi_idx++;
|
||||
}
|
||||
iter->bi_bvec_done = done;
|
||||
iter->bi_size -= bytes;
|
||||
}
|
||||
|
||||
#define for_each_bvec(bvl, bio_vec, iter, start) \
|
||||
for (iter = (start); \
|
||||
(iter).bi_size && \
|
||||
((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \
|
||||
(bvl).bv_len ? (void)bvec_iter_advance((bio_vec), &(iter), \
|
||||
(bvl).bv_len) : bvec_iter_skip_zero_bvec(&(iter)))
|
||||
bvec_iter_advance_single((bio_vec), &(iter), (bvl).bv_len))
|
||||
|
||||
/* for iterating one bio from start to end */
|
||||
#define BVEC_ITER_ALL_INIT (struct bvec_iter) \
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue