mirror of https://gitee.com/openkylin/linux.git
Merge branch 'for-2.6.29' of git://git.kernel.dk/linux-2.6-block
* 'for-2.6.29' of git://git.kernel.dk/linux-2.6-block: (43 commits) bio: get rid of bio_vec clearing bounce: don't rely on a zeroed bio_vec list cciss: simplify parameters to deregister_disk function cfq-iosched: fix race between exiting queue and exiting task loop: Do not call loop_unplug for not configured loop device. loop: Flush possible running bios when loop device is released. alpha: remove dead BIO_VMERGE_BOUNDARY Get rid of CONFIG_LSF block: make blk_softirq_init() static block: use min_not_zero in blk_queue_stack_limits block: add one-hit cache for disk partition lookup cfq-iosched: remove limit of dispatch depth of max 4 times quantum nbd: tell the block layer that it is not a rotational device block: get rid of elevator_t typedef aio: make the lookup_ioctx() lockless bio: add support for inlining a number of bio_vecs inside the bio bio: allow individual slabs in the bio_set bio: move the slab pointer inside the bio_set bio: only mempool back the largest bio_vec slab cache block: don't use plugging on SSD devices ...
This commit is contained in:
commit
1dff81f20c
|
@ -914,7 +914,7 @@ I/O scheduler, a.k.a. elevator, is implemented in two layers. Generic dispatch
|
||||||
queue and specific I/O schedulers. Unless stated otherwise, elevator is used
|
queue and specific I/O schedulers. Unless stated otherwise, elevator is used
|
||||||
to refer to both parts and I/O scheduler to specific I/O schedulers.
|
to refer to both parts and I/O scheduler to specific I/O schedulers.
|
||||||
|
|
||||||
Block layer implements generic dispatch queue in ll_rw_blk.c and elevator.c.
|
Block layer implements generic dispatch queue in block/*.c.
|
||||||
The generic dispatch queue is responsible for properly ordering barrier
|
The generic dispatch queue is responsible for properly ordering barrier
|
||||||
requests, requeueing, handling non-fs requests and all other subtleties.
|
requests, requeueing, handling non-fs requests and all other subtleties.
|
||||||
|
|
||||||
|
@ -926,8 +926,8 @@ be built inside the kernel. Each queue can choose different one and can also
|
||||||
change to another one dynamically.
|
change to another one dynamically.
|
||||||
|
|
||||||
A block layer call to the i/o scheduler follows the convention elv_xxx(). This
|
A block layer call to the i/o scheduler follows the convention elv_xxx(). This
|
||||||
calls elevator_xxx_fn in the elevator switch (drivers/block/elevator.c). Oh,
|
calls elevator_xxx_fn in the elevator switch (block/elevator.c). Oh, xxx
|
||||||
xxx and xxx might not match exactly, but use your imagination. If an elevator
|
and xxx might not match exactly, but use your imagination. If an elevator
|
||||||
doesn't implement a function, the switch does nothing or some minimal house
|
doesn't implement a function, the switch does nothing or some minimal house
|
||||||
keeping work.
|
keeping work.
|
||||||
|
|
||||||
|
|
|
@ -96,9 +96,6 @@ static inline dma_addr_t __deprecated isa_page_to_bus(struct page *page)
|
||||||
return page_to_phys(page);
|
return page_to_phys(page);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* This depends on working iommu. */
|
|
||||||
#define BIO_VMERGE_BOUNDARY (alpha_mv.mv_pci_tbi ? PAGE_SIZE : 0)
|
|
||||||
|
|
||||||
/* Maximum PIO space address supported? */
|
/* Maximum PIO space address supported? */
|
||||||
#define IO_SPACE_LIMIT 0xffff
|
#define IO_SPACE_LIMIT 0xffff
|
||||||
|
|
||||||
|
|
|
@ -263,7 +263,7 @@ int s390_enable_sie(void)
|
||||||
/* lets check if we are allowed to replace the mm */
|
/* lets check if we are allowed to replace the mm */
|
||||||
task_lock(tsk);
|
task_lock(tsk);
|
||||||
if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
|
if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
|
||||||
tsk->mm != tsk->active_mm || tsk->mm->ioctx_list) {
|
tsk->mm != tsk->active_mm || !hlist_empty(&tsk->mm->ioctx_list)) {
|
||||||
task_unlock(tsk);
|
task_unlock(tsk);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
@ -279,7 +279,7 @@ int s390_enable_sie(void)
|
||||||
/* Now lets check again if something happened */
|
/* Now lets check again if something happened */
|
||||||
task_lock(tsk);
|
task_lock(tsk);
|
||||||
if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
|
if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
|
||||||
tsk->mm != tsk->active_mm || tsk->mm->ioctx_list) {
|
tsk->mm != tsk->active_mm || !hlist_empty(&tsk->mm->ioctx_list)) {
|
||||||
mmput(mm);
|
mmput(mm);
|
||||||
task_unlock(tsk);
|
task_unlock(tsk);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
|
@ -24,21 +24,17 @@ menuconfig BLOCK
|
||||||
if BLOCK
|
if BLOCK
|
||||||
|
|
||||||
config LBD
|
config LBD
|
||||||
bool "Support for Large Block Devices"
|
bool "Support for large block devices and files"
|
||||||
depends on !64BIT
|
depends on !64BIT
|
||||||
help
|
help
|
||||||
Enable block devices of size 2TB and larger.
|
Enable block devices or files of size 2TB and larger.
|
||||||
|
|
||||||
This option is required to support the full capacity of large
|
This option is required to support the full capacity of large
|
||||||
(2TB+) block devices, including RAID, disk, Network Block Device,
|
(2TB+) block devices, including RAID, disk, Network Block Device,
|
||||||
Logical Volume Manager (LVM) and loopback.
|
Logical Volume Manager (LVM) and loopback.
|
||||||
|
|
||||||
For example, RAID devices are frequently bigger than the capacity
|
This option also enables support for single files larger than
|
||||||
of the largest individual hard drive.
|
2TB.
|
||||||
|
|
||||||
This option is not required if you have individual disk drives
|
|
||||||
which total 2TB+ and you are not aggregating the capacity into
|
|
||||||
a large block device (e.g. using RAID or LVM).
|
|
||||||
|
|
||||||
If unsure, say N.
|
If unsure, say N.
|
||||||
|
|
||||||
|
@ -58,15 +54,6 @@ config BLK_DEV_IO_TRACE
|
||||||
|
|
||||||
If unsure, say N.
|
If unsure, say N.
|
||||||
|
|
||||||
config LSF
|
|
||||||
bool "Support for Large Single Files"
|
|
||||||
depends on !64BIT
|
|
||||||
help
|
|
||||||
Say Y here if you want to be able to handle very large files (2TB
|
|
||||||
and larger), otherwise say N.
|
|
||||||
|
|
||||||
If unsure, say Y.
|
|
||||||
|
|
||||||
config BLK_DEV_BSG
|
config BLK_DEV_BSG
|
||||||
bool "Block layer SG support v4 (EXPERIMENTAL)"
|
bool "Block layer SG support v4 (EXPERIMENTAL)"
|
||||||
depends on EXPERIMENTAL
|
depends on EXPERIMENTAL
|
||||||
|
|
|
@ -1339,12 +1339,12 @@ static int as_may_queue(struct request_queue *q, int rw)
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void as_exit_queue(elevator_t *e)
|
static void as_exit_queue(struct elevator_queue *e)
|
||||||
{
|
{
|
||||||
struct as_data *ad = e->elevator_data;
|
struct as_data *ad = e->elevator_data;
|
||||||
|
|
||||||
del_timer_sync(&ad->antic_timer);
|
del_timer_sync(&ad->antic_timer);
|
||||||
kblockd_flush_work(&ad->antic_work);
|
cancel_work_sync(&ad->antic_work);
|
||||||
|
|
||||||
BUG_ON(!list_empty(&ad->fifo_list[REQ_SYNC]));
|
BUG_ON(!list_empty(&ad->fifo_list[REQ_SYNC]));
|
||||||
BUG_ON(!list_empty(&ad->fifo_list[REQ_ASYNC]));
|
BUG_ON(!list_empty(&ad->fifo_list[REQ_ASYNC]));
|
||||||
|
@ -1409,7 +1409,7 @@ as_var_store(unsigned long *var, const char *page, size_t count)
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
|
|
||||||
static ssize_t est_time_show(elevator_t *e, char *page)
|
static ssize_t est_time_show(struct elevator_queue *e, char *page)
|
||||||
{
|
{
|
||||||
struct as_data *ad = e->elevator_data;
|
struct as_data *ad = e->elevator_data;
|
||||||
int pos = 0;
|
int pos = 0;
|
||||||
|
@ -1427,7 +1427,7 @@ static ssize_t est_time_show(elevator_t *e, char *page)
|
||||||
}
|
}
|
||||||
|
|
||||||
#define SHOW_FUNCTION(__FUNC, __VAR) \
|
#define SHOW_FUNCTION(__FUNC, __VAR) \
|
||||||
static ssize_t __FUNC(elevator_t *e, char *page) \
|
static ssize_t __FUNC(struct elevator_queue *e, char *page) \
|
||||||
{ \
|
{ \
|
||||||
struct as_data *ad = e->elevator_data; \
|
struct as_data *ad = e->elevator_data; \
|
||||||
return as_var_show(jiffies_to_msecs((__VAR)), (page)); \
|
return as_var_show(jiffies_to_msecs((__VAR)), (page)); \
|
||||||
|
@ -1440,7 +1440,7 @@ SHOW_FUNCTION(as_write_batch_expire_show, ad->batch_expire[REQ_ASYNC]);
|
||||||
#undef SHOW_FUNCTION
|
#undef SHOW_FUNCTION
|
||||||
|
|
||||||
#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \
|
#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \
|
||||||
static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \
|
static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
|
||||||
{ \
|
{ \
|
||||||
struct as_data *ad = e->elevator_data; \
|
struct as_data *ad = e->elevator_data; \
|
||||||
int ret = as_var_store(__PTR, (page), count); \
|
int ret = as_var_store(__PTR, (page), count); \
|
||||||
|
|
|
@ -24,8 +24,8 @@
|
||||||
int blk_queue_ordered(struct request_queue *q, unsigned ordered,
|
int blk_queue_ordered(struct request_queue *q, unsigned ordered,
|
||||||
prepare_flush_fn *prepare_flush_fn)
|
prepare_flush_fn *prepare_flush_fn)
|
||||||
{
|
{
|
||||||
if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) &&
|
if (!prepare_flush_fn && (ordered & (QUEUE_ORDERED_DO_PREFLUSH |
|
||||||
prepare_flush_fn == NULL) {
|
QUEUE_ORDERED_DO_POSTFLUSH))) {
|
||||||
printk(KERN_ERR "%s: prepare_flush_fn required\n", __func__);
|
printk(KERN_ERR "%s: prepare_flush_fn required\n", __func__);
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
@ -88,7 +88,7 @@ unsigned blk_ordered_req_seq(struct request *rq)
|
||||||
return QUEUE_ORDSEQ_DONE;
|
return QUEUE_ORDSEQ_DONE;
|
||||||
}
|
}
|
||||||
|
|
||||||
void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
|
bool blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
|
||||||
{
|
{
|
||||||
struct request *rq;
|
struct request *rq;
|
||||||
|
|
||||||
|
@ -99,7 +99,7 @@ void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
|
||||||
q->ordseq |= seq;
|
q->ordseq |= seq;
|
||||||
|
|
||||||
if (blk_ordered_cur_seq(q) != QUEUE_ORDSEQ_DONE)
|
if (blk_ordered_cur_seq(q) != QUEUE_ORDSEQ_DONE)
|
||||||
return;
|
return false;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Okay, sequence complete.
|
* Okay, sequence complete.
|
||||||
|
@ -109,6 +109,8 @@ void blk_ordered_complete_seq(struct request_queue *q, unsigned seq, int error)
|
||||||
|
|
||||||
if (__blk_end_request(rq, q->orderr, blk_rq_bytes(rq)))
|
if (__blk_end_request(rq, q->orderr, blk_rq_bytes(rq)))
|
||||||
BUG();
|
BUG();
|
||||||
|
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void pre_flush_end_io(struct request *rq, int error)
|
static void pre_flush_end_io(struct request *rq, int error)
|
||||||
|
@ -134,7 +136,7 @@ static void queue_flush(struct request_queue *q, unsigned which)
|
||||||
struct request *rq;
|
struct request *rq;
|
||||||
rq_end_io_fn *end_io;
|
rq_end_io_fn *end_io;
|
||||||
|
|
||||||
if (which == QUEUE_ORDERED_PREFLUSH) {
|
if (which == QUEUE_ORDERED_DO_PREFLUSH) {
|
||||||
rq = &q->pre_flush_rq;
|
rq = &q->pre_flush_rq;
|
||||||
end_io = pre_flush_end_io;
|
end_io = pre_flush_end_io;
|
||||||
} else {
|
} else {
|
||||||
|
@ -151,80 +153,110 @@ static void queue_flush(struct request_queue *q, unsigned which)
|
||||||
elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
|
elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct request *start_ordered(struct request_queue *q,
|
static inline bool start_ordered(struct request_queue *q, struct request **rqp)
|
||||||
struct request *rq)
|
|
||||||
{
|
{
|
||||||
|
struct request *rq = *rqp;
|
||||||
|
unsigned skip = 0;
|
||||||
|
|
||||||
q->orderr = 0;
|
q->orderr = 0;
|
||||||
q->ordered = q->next_ordered;
|
q->ordered = q->next_ordered;
|
||||||
q->ordseq |= QUEUE_ORDSEQ_STARTED;
|
q->ordseq |= QUEUE_ORDSEQ_STARTED;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Prep proxy barrier request.
|
* For an empty barrier, there's no actual BAR request, which
|
||||||
|
* in turn makes POSTFLUSH unnecessary. Mask them off.
|
||||||
*/
|
*/
|
||||||
|
if (!rq->hard_nr_sectors) {
|
||||||
|
q->ordered &= ~(QUEUE_ORDERED_DO_BAR |
|
||||||
|
QUEUE_ORDERED_DO_POSTFLUSH);
|
||||||
|
/*
|
||||||
|
* Empty barrier on a write-through device w/ ordered
|
||||||
|
* tag has no command to issue and without any command
|
||||||
|
* to issue, ordering by tag can't be used. Drain
|
||||||
|
* instead.
|
||||||
|
*/
|
||||||
|
if ((q->ordered & QUEUE_ORDERED_BY_TAG) &&
|
||||||
|
!(q->ordered & QUEUE_ORDERED_DO_PREFLUSH)) {
|
||||||
|
q->ordered &= ~QUEUE_ORDERED_BY_TAG;
|
||||||
|
q->ordered |= QUEUE_ORDERED_BY_DRAIN;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* stash away the original request */
|
||||||
elv_dequeue_request(q, rq);
|
elv_dequeue_request(q, rq);
|
||||||
q->orig_bar_rq = rq;
|
q->orig_bar_rq = rq;
|
||||||
rq = &q->bar_rq;
|
rq = NULL;
|
||||||
blk_rq_init(q, rq);
|
|
||||||
if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
|
|
||||||
rq->cmd_flags |= REQ_RW;
|
|
||||||
if (q->ordered & QUEUE_ORDERED_FUA)
|
|
||||||
rq->cmd_flags |= REQ_FUA;
|
|
||||||
init_request_from_bio(rq, q->orig_bar_rq->bio);
|
|
||||||
rq->end_io = bar_end_io;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Queue ordered sequence. As we stack them at the head, we
|
* Queue ordered sequence. As we stack them at the head, we
|
||||||
* need to queue in reverse order. Note that we rely on that
|
* need to queue in reverse order. Note that we rely on that
|
||||||
* no fs request uses ELEVATOR_INSERT_FRONT and thus no fs
|
* no fs request uses ELEVATOR_INSERT_FRONT and thus no fs
|
||||||
* request gets inbetween ordered sequence. If this request is
|
* request gets inbetween ordered sequence.
|
||||||
* an empty barrier, we don't need to do a postflush ever since
|
|
||||||
* there will be no data written between the pre and post flush.
|
|
||||||
* Hence a single flush will suffice.
|
|
||||||
*/
|
*/
|
||||||
if ((q->ordered & QUEUE_ORDERED_POSTFLUSH) && !blk_empty_barrier(rq))
|
if (q->ordered & QUEUE_ORDERED_DO_POSTFLUSH) {
|
||||||
queue_flush(q, QUEUE_ORDERED_POSTFLUSH);
|
queue_flush(q, QUEUE_ORDERED_DO_POSTFLUSH);
|
||||||
else
|
rq = &q->post_flush_rq;
|
||||||
q->ordseq |= QUEUE_ORDSEQ_POSTFLUSH;
|
} else
|
||||||
|
skip |= QUEUE_ORDSEQ_POSTFLUSH;
|
||||||
|
|
||||||
elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
|
if (q->ordered & QUEUE_ORDERED_DO_BAR) {
|
||||||
|
rq = &q->bar_rq;
|
||||||
|
|
||||||
if (q->ordered & QUEUE_ORDERED_PREFLUSH) {
|
/* initialize proxy request and queue it */
|
||||||
queue_flush(q, QUEUE_ORDERED_PREFLUSH);
|
blk_rq_init(q, rq);
|
||||||
|
if (bio_data_dir(q->orig_bar_rq->bio) == WRITE)
|
||||||
|
rq->cmd_flags |= REQ_RW;
|
||||||
|
if (q->ordered & QUEUE_ORDERED_DO_FUA)
|
||||||
|
rq->cmd_flags |= REQ_FUA;
|
||||||
|
init_request_from_bio(rq, q->orig_bar_rq->bio);
|
||||||
|
rq->end_io = bar_end_io;
|
||||||
|
|
||||||
|
elv_insert(q, rq, ELEVATOR_INSERT_FRONT);
|
||||||
|
} else
|
||||||
|
skip |= QUEUE_ORDSEQ_BAR;
|
||||||
|
|
||||||
|
if (q->ordered & QUEUE_ORDERED_DO_PREFLUSH) {
|
||||||
|
queue_flush(q, QUEUE_ORDERED_DO_PREFLUSH);
|
||||||
rq = &q->pre_flush_rq;
|
rq = &q->pre_flush_rq;
|
||||||
} else
|
} else
|
||||||
q->ordseq |= QUEUE_ORDSEQ_PREFLUSH;
|
skip |= QUEUE_ORDSEQ_PREFLUSH;
|
||||||
|
|
||||||
if ((q->ordered & QUEUE_ORDERED_TAG) || q->in_flight == 0)
|
if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && q->in_flight)
|
||||||
q->ordseq |= QUEUE_ORDSEQ_DRAIN;
|
|
||||||
else
|
|
||||||
rq = NULL;
|
rq = NULL;
|
||||||
|
else
|
||||||
|
skip |= QUEUE_ORDSEQ_DRAIN;
|
||||||
|
|
||||||
return rq;
|
*rqp = rq;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Complete skipped sequences. If whole sequence is complete,
|
||||||
|
* return false to tell elevator that this request is gone.
|
||||||
|
*/
|
||||||
|
return !blk_ordered_complete_seq(q, skip, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
int blk_do_ordered(struct request_queue *q, struct request **rqp)
|
bool blk_do_ordered(struct request_queue *q, struct request **rqp)
|
||||||
{
|
{
|
||||||
struct request *rq = *rqp;
|
struct request *rq = *rqp;
|
||||||
const int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
|
const int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq);
|
||||||
|
|
||||||
if (!q->ordseq) {
|
if (!q->ordseq) {
|
||||||
if (!is_barrier)
|
if (!is_barrier)
|
||||||
return 1;
|
return true;
|
||||||
|
|
||||||
if (q->next_ordered != QUEUE_ORDERED_NONE) {
|
if (q->next_ordered != QUEUE_ORDERED_NONE)
|
||||||
*rqp = start_ordered(q, rq);
|
return start_ordered(q, rqp);
|
||||||
return 1;
|
else {
|
||||||
} else {
|
|
||||||
/*
|
/*
|
||||||
* This can happen when the queue switches to
|
* Queue ordering not supported. Terminate
|
||||||
* ORDERED_NONE while this request is on it.
|
* with prejudice.
|
||||||
*/
|
*/
|
||||||
elv_dequeue_request(q, rq);
|
elv_dequeue_request(q, rq);
|
||||||
if (__blk_end_request(rq, -EOPNOTSUPP,
|
if (__blk_end_request(rq, -EOPNOTSUPP,
|
||||||
blk_rq_bytes(rq)))
|
blk_rq_bytes(rq)))
|
||||||
BUG();
|
BUG();
|
||||||
*rqp = NULL;
|
*rqp = NULL;
|
||||||
return 0;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -235,9 +267,9 @@ int blk_do_ordered(struct request_queue *q, struct request **rqp)
|
||||||
/* Special requests are not subject to ordering rules. */
|
/* Special requests are not subject to ordering rules. */
|
||||||
if (!blk_fs_request(rq) &&
|
if (!blk_fs_request(rq) &&
|
||||||
rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
|
rq != &q->pre_flush_rq && rq != &q->post_flush_rq)
|
||||||
return 1;
|
return true;
|
||||||
|
|
||||||
if (q->ordered & QUEUE_ORDERED_TAG) {
|
if (q->ordered & QUEUE_ORDERED_BY_TAG) {
|
||||||
/* Ordered by tag. Blocking the next barrier is enough. */
|
/* Ordered by tag. Blocking the next barrier is enough. */
|
||||||
if (is_barrier && rq != &q->bar_rq)
|
if (is_barrier && rq != &q->bar_rq)
|
||||||
*rqp = NULL;
|
*rqp = NULL;
|
||||||
|
@ -248,7 +280,7 @@ int blk_do_ordered(struct request_queue *q, struct request **rqp)
|
||||||
*rqp = NULL;
|
*rqp = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
return 1;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void bio_end_empty_barrier(struct bio *bio, int err)
|
static void bio_end_empty_barrier(struct bio *bio, int err)
|
||||||
|
|
|
@ -153,6 +153,9 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
|
||||||
nbytes = bio->bi_size;
|
nbytes = bio->bi_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (unlikely(rq->cmd_flags & REQ_QUIET))
|
||||||
|
set_bit(BIO_QUIET, &bio->bi_flags);
|
||||||
|
|
||||||
bio->bi_size -= nbytes;
|
bio->bi_size -= nbytes;
|
||||||
bio->bi_sector += (nbytes >> 9);
|
bio->bi_sector += (nbytes >> 9);
|
||||||
|
|
||||||
|
@ -265,8 +268,7 @@ void __generic_unplug_device(struct request_queue *q)
|
||||||
{
|
{
|
||||||
if (unlikely(blk_queue_stopped(q)))
|
if (unlikely(blk_queue_stopped(q)))
|
||||||
return;
|
return;
|
||||||
|
if (!blk_remove_plug(q) && !blk_queue_nonrot(q))
|
||||||
if (!blk_remove_plug(q))
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
q->request_fn(q);
|
q->request_fn(q);
|
||||||
|
@ -404,7 +406,8 @@ EXPORT_SYMBOL(blk_stop_queue);
|
||||||
void blk_sync_queue(struct request_queue *q)
|
void blk_sync_queue(struct request_queue *q)
|
||||||
{
|
{
|
||||||
del_timer_sync(&q->unplug_timer);
|
del_timer_sync(&q->unplug_timer);
|
||||||
kblockd_flush_work(&q->unplug_work);
|
del_timer_sync(&q->timeout);
|
||||||
|
cancel_work_sync(&q->unplug_work);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_sync_queue);
|
EXPORT_SYMBOL(blk_sync_queue);
|
||||||
|
|
||||||
|
@ -1135,7 +1138,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
|
||||||
static int __make_request(struct request_queue *q, struct bio *bio)
|
static int __make_request(struct request_queue *q, struct bio *bio)
|
||||||
{
|
{
|
||||||
struct request *req;
|
struct request *req;
|
||||||
int el_ret, nr_sectors, barrier, discard, err;
|
int el_ret, nr_sectors;
|
||||||
const unsigned short prio = bio_prio(bio);
|
const unsigned short prio = bio_prio(bio);
|
||||||
const int sync = bio_sync(bio);
|
const int sync = bio_sync(bio);
|
||||||
int rw_flags;
|
int rw_flags;
|
||||||
|
@ -1149,22 +1152,9 @@ static int __make_request(struct request_queue *q, struct bio *bio)
|
||||||
*/
|
*/
|
||||||
blk_queue_bounce(q, &bio);
|
blk_queue_bounce(q, &bio);
|
||||||
|
|
||||||
barrier = bio_barrier(bio);
|
|
||||||
if (unlikely(barrier) && bio_has_data(bio) &&
|
|
||||||
(q->next_ordered == QUEUE_ORDERED_NONE)) {
|
|
||||||
err = -EOPNOTSUPP;
|
|
||||||
goto end_io;
|
|
||||||
}
|
|
||||||
|
|
||||||
discard = bio_discard(bio);
|
|
||||||
if (unlikely(discard) && !q->prepare_discard_fn) {
|
|
||||||
err = -EOPNOTSUPP;
|
|
||||||
goto end_io;
|
|
||||||
}
|
|
||||||
|
|
||||||
spin_lock_irq(q->queue_lock);
|
spin_lock_irq(q->queue_lock);
|
||||||
|
|
||||||
if (unlikely(barrier) || elv_queue_empty(q))
|
if (unlikely(bio_barrier(bio)) || elv_queue_empty(q))
|
||||||
goto get_rq;
|
goto get_rq;
|
||||||
|
|
||||||
el_ret = elv_merge(q, &req, bio);
|
el_ret = elv_merge(q, &req, bio);
|
||||||
|
@ -1250,18 +1240,14 @@ static int __make_request(struct request_queue *q, struct bio *bio)
|
||||||
if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) ||
|
if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) ||
|
||||||
bio_flagged(bio, BIO_CPU_AFFINE))
|
bio_flagged(bio, BIO_CPU_AFFINE))
|
||||||
req->cpu = blk_cpu_to_group(smp_processor_id());
|
req->cpu = blk_cpu_to_group(smp_processor_id());
|
||||||
if (elv_queue_empty(q))
|
if (!blk_queue_nonrot(q) && elv_queue_empty(q))
|
||||||
blk_plug_device(q);
|
blk_plug_device(q);
|
||||||
add_request(q, req);
|
add_request(q, req);
|
||||||
out:
|
out:
|
||||||
if (sync)
|
if (sync || blk_queue_nonrot(q))
|
||||||
__generic_unplug_device(q);
|
__generic_unplug_device(q);
|
||||||
spin_unlock_irq(q->queue_lock);
|
spin_unlock_irq(q->queue_lock);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
end_io:
|
|
||||||
bio_endio(bio, err);
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1414,15 +1400,13 @@ static inline void __generic_make_request(struct bio *bio)
|
||||||
char b[BDEVNAME_SIZE];
|
char b[BDEVNAME_SIZE];
|
||||||
|
|
||||||
q = bdev_get_queue(bio->bi_bdev);
|
q = bdev_get_queue(bio->bi_bdev);
|
||||||
if (!q) {
|
if (unlikely(!q)) {
|
||||||
printk(KERN_ERR
|
printk(KERN_ERR
|
||||||
"generic_make_request: Trying to access "
|
"generic_make_request: Trying to access "
|
||||||
"nonexistent block-device %s (%Lu)\n",
|
"nonexistent block-device %s (%Lu)\n",
|
||||||
bdevname(bio->bi_bdev, b),
|
bdevname(bio->bi_bdev, b),
|
||||||
(long long) bio->bi_sector);
|
(long long) bio->bi_sector);
|
||||||
end_io:
|
goto end_io;
|
||||||
bio_endio(bio, err);
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(nr_sectors > q->max_hw_sectors)) {
|
if (unlikely(nr_sectors > q->max_hw_sectors)) {
|
||||||
|
@ -1459,14 +1443,19 @@ static inline void __generic_make_request(struct bio *bio)
|
||||||
|
|
||||||
if (bio_check_eod(bio, nr_sectors))
|
if (bio_check_eod(bio, nr_sectors))
|
||||||
goto end_io;
|
goto end_io;
|
||||||
if ((bio_empty_barrier(bio) && !q->prepare_flush_fn) ||
|
|
||||||
(bio_discard(bio) && !q->prepare_discard_fn)) {
|
if (bio_discard(bio) && !q->prepare_discard_fn) {
|
||||||
err = -EOPNOTSUPP;
|
err = -EOPNOTSUPP;
|
||||||
goto end_io;
|
goto end_io;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = q->make_request_fn(q, bio);
|
ret = q->make_request_fn(q, bio);
|
||||||
} while (ret);
|
} while (ret);
|
||||||
|
|
||||||
|
return;
|
||||||
|
|
||||||
|
end_io:
|
||||||
|
bio_endio(bio, err);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1716,14 +1705,6 @@ static int __end_that_request_first(struct request *req, int error,
|
||||||
while ((bio = req->bio) != NULL) {
|
while ((bio = req->bio) != NULL) {
|
||||||
int nbytes;
|
int nbytes;
|
||||||
|
|
||||||
/*
|
|
||||||
* For an empty barrier request, the low level driver must
|
|
||||||
* store a potential error location in ->sector. We pass
|
|
||||||
* that back up in ->bi_sector.
|
|
||||||
*/
|
|
||||||
if (blk_empty_barrier(req))
|
|
||||||
bio->bi_sector = req->sector;
|
|
||||||
|
|
||||||
if (nr_bytes >= bio->bi_size) {
|
if (nr_bytes >= bio->bi_size) {
|
||||||
req->bio = bio->bi_next;
|
req->bio = bio->bi_next;
|
||||||
nbytes = bio->bi_size;
|
nbytes = bio->bi_size;
|
||||||
|
@ -2143,12 +2124,6 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(kblockd_schedule_work);
|
EXPORT_SYMBOL(kblockd_schedule_work);
|
||||||
|
|
||||||
void kblockd_flush_work(struct work_struct *work)
|
|
||||||
{
|
|
||||||
cancel_work_sync(work);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(kblockd_flush_work);
|
|
||||||
|
|
||||||
int __init blk_dev_init(void)
|
int __init blk_dev_init(void)
|
||||||
{
|
{
|
||||||
kblockd_workqueue = create_workqueue("kblockd");
|
kblockd_workqueue = create_workqueue("kblockd");
|
||||||
|
|
|
@ -319,9 +319,9 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
|
||||||
t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
|
t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
|
||||||
t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, b->seg_boundary_mask);
|
t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, b->seg_boundary_mask);
|
||||||
|
|
||||||
t->max_phys_segments = min(t->max_phys_segments, b->max_phys_segments);
|
t->max_phys_segments = min_not_zero(t->max_phys_segments, b->max_phys_segments);
|
||||||
t->max_hw_segments = min(t->max_hw_segments, b->max_hw_segments);
|
t->max_hw_segments = min_not_zero(t->max_hw_segments, b->max_hw_segments);
|
||||||
t->max_segment_size = min(t->max_segment_size, b->max_segment_size);
|
t->max_segment_size = min_not_zero(t->max_segment_size, b->max_segment_size);
|
||||||
t->hardsect_size = max(t->hardsect_size, b->hardsect_size);
|
t->hardsect_size = max(t->hardsect_size, b->hardsect_size);
|
||||||
if (!t->queue_lock)
|
if (!t->queue_lock)
|
||||||
WARN_ON_ONCE(1);
|
WARN_ON_ONCE(1);
|
||||||
|
|
|
@ -161,7 +161,7 @@ void blk_complete_request(struct request *req)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(blk_complete_request);
|
EXPORT_SYMBOL(blk_complete_request);
|
||||||
|
|
||||||
__init int blk_softirq_init(void)
|
static __init int blk_softirq_init(void)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
|
|
@ -88,9 +88,7 @@ queue_ra_store(struct request_queue *q, const char *page, size_t count)
|
||||||
unsigned long ra_kb;
|
unsigned long ra_kb;
|
||||||
ssize_t ret = queue_var_store(&ra_kb, page, count);
|
ssize_t ret = queue_var_store(&ra_kb, page, count);
|
||||||
|
|
||||||
spin_lock_irq(q->queue_lock);
|
|
||||||
q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
|
q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10);
|
||||||
spin_unlock_irq(q->queue_lock);
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -117,10 +115,7 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count)
|
||||||
|
|
||||||
if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
|
if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
/*
|
|
||||||
* Take the queue lock to update the readahead and max_sectors
|
|
||||||
* values synchronously:
|
|
||||||
*/
|
|
||||||
spin_lock_irq(q->queue_lock);
|
spin_lock_irq(q->queue_lock);
|
||||||
q->max_sectors = max_sectors_kb << 1;
|
q->max_sectors = max_sectors_kb << 1;
|
||||||
spin_unlock_irq(q->queue_lock);
|
spin_unlock_irq(q->queue_lock);
|
||||||
|
|
|
@ -158,7 +158,6 @@ static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q,
|
||||||
/**
|
/**
|
||||||
* blk_init_tags - initialize the tag info for an external tag map
|
* blk_init_tags - initialize the tag info for an external tag map
|
||||||
* @depth: the maximum queue depth supported
|
* @depth: the maximum queue depth supported
|
||||||
* @tags: the tag to use
|
|
||||||
**/
|
**/
|
||||||
struct blk_queue_tag *blk_init_tags(int depth)
|
struct blk_queue_tag *blk_init_tags(int depth)
|
||||||
{
|
{
|
||||||
|
|
|
@ -73,11 +73,7 @@ ssize_t part_timeout_store(struct device *dev, struct device_attribute *attr,
|
||||||
*/
|
*/
|
||||||
void blk_delete_timer(struct request *req)
|
void blk_delete_timer(struct request *req)
|
||||||
{
|
{
|
||||||
struct request_queue *q = req->q;
|
|
||||||
|
|
||||||
list_del_init(&req->timeout_list);
|
list_del_init(&req->timeout_list);
|
||||||
if (list_empty(&q->timeout_list))
|
|
||||||
del_timer(&q->timeout);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void blk_rq_timed_out(struct request *req)
|
static void blk_rq_timed_out(struct request *req)
|
||||||
|
@ -111,7 +107,7 @@ static void blk_rq_timed_out(struct request *req)
|
||||||
void blk_rq_timed_out_timer(unsigned long data)
|
void blk_rq_timed_out_timer(unsigned long data)
|
||||||
{
|
{
|
||||||
struct request_queue *q = (struct request_queue *) data;
|
struct request_queue *q = (struct request_queue *) data;
|
||||||
unsigned long flags, uninitialized_var(next), next_set = 0;
|
unsigned long flags, next = 0;
|
||||||
struct request *rq, *tmp;
|
struct request *rq, *tmp;
|
||||||
|
|
||||||
spin_lock_irqsave(q->queue_lock, flags);
|
spin_lock_irqsave(q->queue_lock, flags);
|
||||||
|
@ -126,15 +122,18 @@ void blk_rq_timed_out_timer(unsigned long data)
|
||||||
if (blk_mark_rq_complete(rq))
|
if (blk_mark_rq_complete(rq))
|
||||||
continue;
|
continue;
|
||||||
blk_rq_timed_out(rq);
|
blk_rq_timed_out(rq);
|
||||||
|
} else {
|
||||||
|
if (!next || time_after(next, rq->deadline))
|
||||||
|
next = rq->deadline;
|
||||||
}
|
}
|
||||||
if (!next_set) {
|
|
||||||
next = rq->deadline;
|
|
||||||
next_set = 1;
|
|
||||||
} else if (time_after(next, rq->deadline))
|
|
||||||
next = rq->deadline;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (next_set && !list_empty(&q->timeout_list))
|
/*
|
||||||
|
* next can never be 0 here with the list non-empty, since we always
|
||||||
|
* bump ->deadline to 1 so we can detect if the timer was ever added
|
||||||
|
* or not. See comment in blk_add_timer()
|
||||||
|
*/
|
||||||
|
if (next)
|
||||||
mod_timer(&q->timeout, round_jiffies_up(next));
|
mod_timer(&q->timeout, round_jiffies_up(next));
|
||||||
|
|
||||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||||
|
|
|
@ -1136,12 +1136,8 @@ static int cfq_dispatch_requests(struct request_queue *q, int force)
|
||||||
if (cfq_class_idle(cfqq))
|
if (cfq_class_idle(cfqq))
|
||||||
max_dispatch = 1;
|
max_dispatch = 1;
|
||||||
|
|
||||||
if (cfqq->dispatched >= max_dispatch) {
|
if (cfqq->dispatched >= max_dispatch && cfqd->busy_queues > 1)
|
||||||
if (cfqd->busy_queues > 1)
|
break;
|
||||||
break;
|
|
||||||
if (cfqq->dispatched >= 4 * max_dispatch)
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq))
|
if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq))
|
||||||
break;
|
break;
|
||||||
|
@ -1318,7 +1314,15 @@ static void cfq_exit_single_io_context(struct io_context *ioc,
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
|
||||||
spin_lock_irqsave(q->queue_lock, flags);
|
spin_lock_irqsave(q->queue_lock, flags);
|
||||||
__cfq_exit_single_io_context(cfqd, cic);
|
|
||||||
|
/*
|
||||||
|
* Ensure we get a fresh copy of the ->key to prevent
|
||||||
|
* race between exiting task and queue
|
||||||
|
*/
|
||||||
|
smp_read_barrier_depends();
|
||||||
|
if (cic->key)
|
||||||
|
__cfq_exit_single_io_context(cfqd, cic);
|
||||||
|
|
||||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2160,7 +2164,7 @@ static void cfq_idle_slice_timer(unsigned long data)
|
||||||
static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
|
static void cfq_shutdown_timer_wq(struct cfq_data *cfqd)
|
||||||
{
|
{
|
||||||
del_timer_sync(&cfqd->idle_slice_timer);
|
del_timer_sync(&cfqd->idle_slice_timer);
|
||||||
kblockd_flush_work(&cfqd->unplug_work);
|
cancel_work_sync(&cfqd->unplug_work);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void cfq_put_async_queues(struct cfq_data *cfqd)
|
static void cfq_put_async_queues(struct cfq_data *cfqd)
|
||||||
|
@ -2178,7 +2182,7 @@ static void cfq_put_async_queues(struct cfq_data *cfqd)
|
||||||
cfq_put_queue(cfqd->async_idle_cfqq);
|
cfq_put_queue(cfqd->async_idle_cfqq);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void cfq_exit_queue(elevator_t *e)
|
static void cfq_exit_queue(struct elevator_queue *e)
|
||||||
{
|
{
|
||||||
struct cfq_data *cfqd = e->elevator_data;
|
struct cfq_data *cfqd = e->elevator_data;
|
||||||
struct request_queue *q = cfqd->queue;
|
struct request_queue *q = cfqd->queue;
|
||||||
|
@ -2288,7 +2292,7 @@ cfq_var_store(unsigned int *var, const char *page, size_t count)
|
||||||
}
|
}
|
||||||
|
|
||||||
#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
|
#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
|
||||||
static ssize_t __FUNC(elevator_t *e, char *page) \
|
static ssize_t __FUNC(struct elevator_queue *e, char *page) \
|
||||||
{ \
|
{ \
|
||||||
struct cfq_data *cfqd = e->elevator_data; \
|
struct cfq_data *cfqd = e->elevator_data; \
|
||||||
unsigned int __data = __VAR; \
|
unsigned int __data = __VAR; \
|
||||||
|
@ -2308,7 +2312,7 @@ SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
|
||||||
#undef SHOW_FUNCTION
|
#undef SHOW_FUNCTION
|
||||||
|
|
||||||
#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
|
#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
|
||||||
static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \
|
static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
|
||||||
{ \
|
{ \
|
||||||
struct cfq_data *cfqd = e->elevator_data; \
|
struct cfq_data *cfqd = e->elevator_data; \
|
||||||
unsigned int __data; \
|
unsigned int __data; \
|
||||||
|
|
|
@ -774,9 +774,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
|
||||||
bdi = blk_get_backing_dev_info(bdev);
|
bdi = blk_get_backing_dev_info(bdev);
|
||||||
if (bdi == NULL)
|
if (bdi == NULL)
|
||||||
return -ENOTTY;
|
return -ENOTTY;
|
||||||
lock_kernel();
|
|
||||||
bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE;
|
bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE;
|
||||||
unlock_kernel();
|
|
||||||
return 0;
|
return 0;
|
||||||
case BLKGETSIZE:
|
case BLKGETSIZE:
|
||||||
size = bdev->bd_inode->i_size;
|
size = bdev->bd_inode->i_size;
|
||||||
|
|
|
@ -334,7 +334,7 @@ static int deadline_queue_empty(struct request_queue *q)
|
||||||
&& list_empty(&dd->fifo_list[READ]);
|
&& list_empty(&dd->fifo_list[READ]);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void deadline_exit_queue(elevator_t *e)
|
static void deadline_exit_queue(struct elevator_queue *e)
|
||||||
{
|
{
|
||||||
struct deadline_data *dd = e->elevator_data;
|
struct deadline_data *dd = e->elevator_data;
|
||||||
|
|
||||||
|
@ -387,7 +387,7 @@ deadline_var_store(int *var, const char *page, size_t count)
|
||||||
}
|
}
|
||||||
|
|
||||||
#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
|
#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
|
||||||
static ssize_t __FUNC(elevator_t *e, char *page) \
|
static ssize_t __FUNC(struct elevator_queue *e, char *page) \
|
||||||
{ \
|
{ \
|
||||||
struct deadline_data *dd = e->elevator_data; \
|
struct deadline_data *dd = e->elevator_data; \
|
||||||
int __data = __VAR; \
|
int __data = __VAR; \
|
||||||
|
@ -403,7 +403,7 @@ SHOW_FUNCTION(deadline_fifo_batch_show, dd->fifo_batch, 0);
|
||||||
#undef SHOW_FUNCTION
|
#undef SHOW_FUNCTION
|
||||||
|
|
||||||
#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
|
#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
|
||||||
static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \
|
static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
|
||||||
{ \
|
{ \
|
||||||
struct deadline_data *dd = e->elevator_data; \
|
struct deadline_data *dd = e->elevator_data; \
|
||||||
int __data; \
|
int __data; \
|
||||||
|
|
|
@ -65,7 +65,7 @@ DEFINE_TRACE(block_rq_issue);
|
||||||
static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
|
static int elv_iosched_allow_merge(struct request *rq, struct bio *bio)
|
||||||
{
|
{
|
||||||
struct request_queue *q = rq->q;
|
struct request_queue *q = rq->q;
|
||||||
elevator_t *e = q->elevator;
|
struct elevator_queue *e = q->elevator;
|
||||||
|
|
||||||
if (e->ops->elevator_allow_merge_fn)
|
if (e->ops->elevator_allow_merge_fn)
|
||||||
return e->ops->elevator_allow_merge_fn(q, rq, bio);
|
return e->ops->elevator_allow_merge_fn(q, rq, bio);
|
||||||
|
@ -208,13 +208,13 @@ __setup("elevator=", elevator_setup);
|
||||||
|
|
||||||
static struct kobj_type elv_ktype;
|
static struct kobj_type elv_ktype;
|
||||||
|
|
||||||
static elevator_t *elevator_alloc(struct request_queue *q,
|
static struct elevator_queue *elevator_alloc(struct request_queue *q,
|
||||||
struct elevator_type *e)
|
struct elevator_type *e)
|
||||||
{
|
{
|
||||||
elevator_t *eq;
|
struct elevator_queue *eq;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
eq = kmalloc_node(sizeof(elevator_t), GFP_KERNEL | __GFP_ZERO, q->node);
|
eq = kmalloc_node(sizeof(*eq), GFP_KERNEL | __GFP_ZERO, q->node);
|
||||||
if (unlikely(!eq))
|
if (unlikely(!eq))
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
|
@ -240,8 +240,9 @@ static elevator_t *elevator_alloc(struct request_queue *q,
|
||||||
|
|
||||||
static void elevator_release(struct kobject *kobj)
|
static void elevator_release(struct kobject *kobj)
|
||||||
{
|
{
|
||||||
elevator_t *e = container_of(kobj, elevator_t, kobj);
|
struct elevator_queue *e;
|
||||||
|
|
||||||
|
e = container_of(kobj, struct elevator_queue, kobj);
|
||||||
elevator_put(e->elevator_type);
|
elevator_put(e->elevator_type);
|
||||||
kfree(e->hash);
|
kfree(e->hash);
|
||||||
kfree(e);
|
kfree(e);
|
||||||
|
@ -297,7 +298,7 @@ int elevator_init(struct request_queue *q, char *name)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(elevator_init);
|
EXPORT_SYMBOL(elevator_init);
|
||||||
|
|
||||||
void elevator_exit(elevator_t *e)
|
void elevator_exit(struct elevator_queue *e)
|
||||||
{
|
{
|
||||||
mutex_lock(&e->sysfs_lock);
|
mutex_lock(&e->sysfs_lock);
|
||||||
if (e->ops->elevator_exit_fn)
|
if (e->ops->elevator_exit_fn)
|
||||||
|
@ -311,7 +312,7 @@ EXPORT_SYMBOL(elevator_exit);
|
||||||
|
|
||||||
static void elv_activate_rq(struct request_queue *q, struct request *rq)
|
static void elv_activate_rq(struct request_queue *q, struct request *rq)
|
||||||
{
|
{
|
||||||
elevator_t *e = q->elevator;
|
struct elevator_queue *e = q->elevator;
|
||||||
|
|
||||||
if (e->ops->elevator_activate_req_fn)
|
if (e->ops->elevator_activate_req_fn)
|
||||||
e->ops->elevator_activate_req_fn(q, rq);
|
e->ops->elevator_activate_req_fn(q, rq);
|
||||||
|
@ -319,7 +320,7 @@ static void elv_activate_rq(struct request_queue *q, struct request *rq)
|
||||||
|
|
||||||
static void elv_deactivate_rq(struct request_queue *q, struct request *rq)
|
static void elv_deactivate_rq(struct request_queue *q, struct request *rq)
|
||||||
{
|
{
|
||||||
elevator_t *e = q->elevator;
|
struct elevator_queue *e = q->elevator;
|
||||||
|
|
||||||
if (e->ops->elevator_deactivate_req_fn)
|
if (e->ops->elevator_deactivate_req_fn)
|
||||||
e->ops->elevator_deactivate_req_fn(q, rq);
|
e->ops->elevator_deactivate_req_fn(q, rq);
|
||||||
|
@ -338,7 +339,7 @@ static void elv_rqhash_del(struct request_queue *q, struct request *rq)
|
||||||
|
|
||||||
static void elv_rqhash_add(struct request_queue *q, struct request *rq)
|
static void elv_rqhash_add(struct request_queue *q, struct request *rq)
|
||||||
{
|
{
|
||||||
elevator_t *e = q->elevator;
|
struct elevator_queue *e = q->elevator;
|
||||||
|
|
||||||
BUG_ON(ELV_ON_HASH(rq));
|
BUG_ON(ELV_ON_HASH(rq));
|
||||||
hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]);
|
hlist_add_head(&rq->hash, &e->hash[ELV_HASH_FN(rq_hash_key(rq))]);
|
||||||
|
@ -352,7 +353,7 @@ static void elv_rqhash_reposition(struct request_queue *q, struct request *rq)
|
||||||
|
|
||||||
static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
|
static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset)
|
||||||
{
|
{
|
||||||
elevator_t *e = q->elevator;
|
struct elevator_queue *e = q->elevator;
|
||||||
struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)];
|
struct hlist_head *hash_list = &e->hash[ELV_HASH_FN(offset)];
|
||||||
struct hlist_node *entry, *next;
|
struct hlist_node *entry, *next;
|
||||||
struct request *rq;
|
struct request *rq;
|
||||||
|
@ -494,7 +495,7 @@ EXPORT_SYMBOL(elv_dispatch_add_tail);
|
||||||
|
|
||||||
int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
|
int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
|
||||||
{
|
{
|
||||||
elevator_t *e = q->elevator;
|
struct elevator_queue *e = q->elevator;
|
||||||
struct request *__rq;
|
struct request *__rq;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
@ -529,7 +530,7 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
|
||||||
|
|
||||||
void elv_merged_request(struct request_queue *q, struct request *rq, int type)
|
void elv_merged_request(struct request_queue *q, struct request *rq, int type)
|
||||||
{
|
{
|
||||||
elevator_t *e = q->elevator;
|
struct elevator_queue *e = q->elevator;
|
||||||
|
|
||||||
if (e->ops->elevator_merged_fn)
|
if (e->ops->elevator_merged_fn)
|
||||||
e->ops->elevator_merged_fn(q, rq, type);
|
e->ops->elevator_merged_fn(q, rq, type);
|
||||||
|
@ -543,7 +544,7 @@ void elv_merged_request(struct request_queue *q, struct request *rq, int type)
|
||||||
void elv_merge_requests(struct request_queue *q, struct request *rq,
|
void elv_merge_requests(struct request_queue *q, struct request *rq,
|
||||||
struct request *next)
|
struct request *next)
|
||||||
{
|
{
|
||||||
elevator_t *e = q->elevator;
|
struct elevator_queue *e = q->elevator;
|
||||||
|
|
||||||
if (e->ops->elevator_merge_req_fn)
|
if (e->ops->elevator_merge_req_fn)
|
||||||
e->ops->elevator_merge_req_fn(q, rq, next);
|
e->ops->elevator_merge_req_fn(q, rq, next);
|
||||||
|
@ -755,14 +756,6 @@ struct request *elv_next_request(struct request_queue *q)
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
while ((rq = __elv_next_request(q)) != NULL) {
|
while ((rq = __elv_next_request(q)) != NULL) {
|
||||||
/*
|
|
||||||
* Kill the empty barrier place holder, the driver must
|
|
||||||
* not ever see it.
|
|
||||||
*/
|
|
||||||
if (blk_empty_barrier(rq)) {
|
|
||||||
__blk_end_request(rq, 0, blk_rq_bytes(rq));
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
if (!(rq->cmd_flags & REQ_STARTED)) {
|
if (!(rq->cmd_flags & REQ_STARTED)) {
|
||||||
/*
|
/*
|
||||||
* This is the first time the device driver
|
* This is the first time the device driver
|
||||||
|
@ -854,7 +847,7 @@ void elv_dequeue_request(struct request_queue *q, struct request *rq)
|
||||||
|
|
||||||
int elv_queue_empty(struct request_queue *q)
|
int elv_queue_empty(struct request_queue *q)
|
||||||
{
|
{
|
||||||
elevator_t *e = q->elevator;
|
struct elevator_queue *e = q->elevator;
|
||||||
|
|
||||||
if (!list_empty(&q->queue_head))
|
if (!list_empty(&q->queue_head))
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -868,7 +861,7 @@ EXPORT_SYMBOL(elv_queue_empty);
|
||||||
|
|
||||||
struct request *elv_latter_request(struct request_queue *q, struct request *rq)
|
struct request *elv_latter_request(struct request_queue *q, struct request *rq)
|
||||||
{
|
{
|
||||||
elevator_t *e = q->elevator;
|
struct elevator_queue *e = q->elevator;
|
||||||
|
|
||||||
if (e->ops->elevator_latter_req_fn)
|
if (e->ops->elevator_latter_req_fn)
|
||||||
return e->ops->elevator_latter_req_fn(q, rq);
|
return e->ops->elevator_latter_req_fn(q, rq);
|
||||||
|
@ -877,7 +870,7 @@ struct request *elv_latter_request(struct request_queue *q, struct request *rq)
|
||||||
|
|
||||||
struct request *elv_former_request(struct request_queue *q, struct request *rq)
|
struct request *elv_former_request(struct request_queue *q, struct request *rq)
|
||||||
{
|
{
|
||||||
elevator_t *e = q->elevator;
|
struct elevator_queue *e = q->elevator;
|
||||||
|
|
||||||
if (e->ops->elevator_former_req_fn)
|
if (e->ops->elevator_former_req_fn)
|
||||||
return e->ops->elevator_former_req_fn(q, rq);
|
return e->ops->elevator_former_req_fn(q, rq);
|
||||||
|
@ -886,7 +879,7 @@ struct request *elv_former_request(struct request_queue *q, struct request *rq)
|
||||||
|
|
||||||
int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
|
int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
|
||||||
{
|
{
|
||||||
elevator_t *e = q->elevator;
|
struct elevator_queue *e = q->elevator;
|
||||||
|
|
||||||
if (e->ops->elevator_set_req_fn)
|
if (e->ops->elevator_set_req_fn)
|
||||||
return e->ops->elevator_set_req_fn(q, rq, gfp_mask);
|
return e->ops->elevator_set_req_fn(q, rq, gfp_mask);
|
||||||
|
@ -897,7 +890,7 @@ int elv_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask)
|
||||||
|
|
||||||
void elv_put_request(struct request_queue *q, struct request *rq)
|
void elv_put_request(struct request_queue *q, struct request *rq)
|
||||||
{
|
{
|
||||||
elevator_t *e = q->elevator;
|
struct elevator_queue *e = q->elevator;
|
||||||
|
|
||||||
if (e->ops->elevator_put_req_fn)
|
if (e->ops->elevator_put_req_fn)
|
||||||
e->ops->elevator_put_req_fn(rq);
|
e->ops->elevator_put_req_fn(rq);
|
||||||
|
@ -905,7 +898,7 @@ void elv_put_request(struct request_queue *q, struct request *rq)
|
||||||
|
|
||||||
int elv_may_queue(struct request_queue *q, int rw)
|
int elv_may_queue(struct request_queue *q, int rw)
|
||||||
{
|
{
|
||||||
elevator_t *e = q->elevator;
|
struct elevator_queue *e = q->elevator;
|
||||||
|
|
||||||
if (e->ops->elevator_may_queue_fn)
|
if (e->ops->elevator_may_queue_fn)
|
||||||
return e->ops->elevator_may_queue_fn(q, rw);
|
return e->ops->elevator_may_queue_fn(q, rw);
|
||||||
|
@ -928,7 +921,7 @@ EXPORT_SYMBOL(elv_abort_queue);
|
||||||
|
|
||||||
void elv_completed_request(struct request_queue *q, struct request *rq)
|
void elv_completed_request(struct request_queue *q, struct request *rq)
|
||||||
{
|
{
|
||||||
elevator_t *e = q->elevator;
|
struct elevator_queue *e = q->elevator;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* request is released from the driver, io must be done
|
* request is released from the driver, io must be done
|
||||||
|
@ -944,10 +937,14 @@ void elv_completed_request(struct request_queue *q, struct request *rq)
|
||||||
* drained for flush sequence.
|
* drained for flush sequence.
|
||||||
*/
|
*/
|
||||||
if (unlikely(q->ordseq)) {
|
if (unlikely(q->ordseq)) {
|
||||||
struct request *first_rq = list_entry_rq(q->queue_head.next);
|
struct request *next = NULL;
|
||||||
if (q->in_flight == 0 &&
|
|
||||||
|
if (!list_empty(&q->queue_head))
|
||||||
|
next = list_entry_rq(q->queue_head.next);
|
||||||
|
|
||||||
|
if (!q->in_flight &&
|
||||||
blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
|
blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
|
||||||
blk_ordered_req_seq(first_rq) > QUEUE_ORDSEQ_DRAIN) {
|
(!next || blk_ordered_req_seq(next) > QUEUE_ORDSEQ_DRAIN)) {
|
||||||
blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
|
blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
|
||||||
blk_start_queueing(q);
|
blk_start_queueing(q);
|
||||||
}
|
}
|
||||||
|
@ -959,13 +956,14 @@ void elv_completed_request(struct request_queue *q, struct request *rq)
|
||||||
static ssize_t
|
static ssize_t
|
||||||
elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
|
elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
|
||||||
{
|
{
|
||||||
elevator_t *e = container_of(kobj, elevator_t, kobj);
|
|
||||||
struct elv_fs_entry *entry = to_elv(attr);
|
struct elv_fs_entry *entry = to_elv(attr);
|
||||||
|
struct elevator_queue *e;
|
||||||
ssize_t error;
|
ssize_t error;
|
||||||
|
|
||||||
if (!entry->show)
|
if (!entry->show)
|
||||||
return -EIO;
|
return -EIO;
|
||||||
|
|
||||||
|
e = container_of(kobj, struct elevator_queue, kobj);
|
||||||
mutex_lock(&e->sysfs_lock);
|
mutex_lock(&e->sysfs_lock);
|
||||||
error = e->ops ? entry->show(e, page) : -ENOENT;
|
error = e->ops ? entry->show(e, page) : -ENOENT;
|
||||||
mutex_unlock(&e->sysfs_lock);
|
mutex_unlock(&e->sysfs_lock);
|
||||||
|
@ -976,13 +974,14 @@ static ssize_t
|
||||||
elv_attr_store(struct kobject *kobj, struct attribute *attr,
|
elv_attr_store(struct kobject *kobj, struct attribute *attr,
|
||||||
const char *page, size_t length)
|
const char *page, size_t length)
|
||||||
{
|
{
|
||||||
elevator_t *e = container_of(kobj, elevator_t, kobj);
|
|
||||||
struct elv_fs_entry *entry = to_elv(attr);
|
struct elv_fs_entry *entry = to_elv(attr);
|
||||||
|
struct elevator_queue *e;
|
||||||
ssize_t error;
|
ssize_t error;
|
||||||
|
|
||||||
if (!entry->store)
|
if (!entry->store)
|
||||||
return -EIO;
|
return -EIO;
|
||||||
|
|
||||||
|
e = container_of(kobj, struct elevator_queue, kobj);
|
||||||
mutex_lock(&e->sysfs_lock);
|
mutex_lock(&e->sysfs_lock);
|
||||||
error = e->ops ? entry->store(e, page, length) : -ENOENT;
|
error = e->ops ? entry->store(e, page, length) : -ENOENT;
|
||||||
mutex_unlock(&e->sysfs_lock);
|
mutex_unlock(&e->sysfs_lock);
|
||||||
|
@ -1001,7 +1000,7 @@ static struct kobj_type elv_ktype = {
|
||||||
|
|
||||||
int elv_register_queue(struct request_queue *q)
|
int elv_register_queue(struct request_queue *q)
|
||||||
{
|
{
|
||||||
elevator_t *e = q->elevator;
|
struct elevator_queue *e = q->elevator;
|
||||||
int error;
|
int error;
|
||||||
|
|
||||||
error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
|
error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
|
||||||
|
@ -1019,7 +1018,7 @@ int elv_register_queue(struct request_queue *q)
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __elv_unregister_queue(elevator_t *e)
|
static void __elv_unregister_queue(struct elevator_queue *e)
|
||||||
{
|
{
|
||||||
kobject_uevent(&e->kobj, KOBJ_REMOVE);
|
kobject_uevent(&e->kobj, KOBJ_REMOVE);
|
||||||
kobject_del(&e->kobj);
|
kobject_del(&e->kobj);
|
||||||
|
@ -1082,7 +1081,7 @@ EXPORT_SYMBOL_GPL(elv_unregister);
|
||||||
*/
|
*/
|
||||||
static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
|
static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
|
||||||
{
|
{
|
||||||
elevator_t *old_elevator, *e;
|
struct elevator_queue *old_elevator, *e;
|
||||||
void *data;
|
void *data;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1188,7 +1187,7 @@ ssize_t elv_iosched_store(struct request_queue *q, const char *name,
|
||||||
|
|
||||||
ssize_t elv_iosched_show(struct request_queue *q, char *name)
|
ssize_t elv_iosched_show(struct request_queue *q, char *name)
|
||||||
{
|
{
|
||||||
elevator_t *e = q->elevator;
|
struct elevator_queue *e = q->elevator;
|
||||||
struct elevator_type *elv = e->elevator_type;
|
struct elevator_type *elv = e->elevator_type;
|
||||||
struct elevator_type *__e;
|
struct elevator_type *__e;
|
||||||
int len = 0;
|
int len = 0;
|
||||||
|
|
|
@ -181,6 +181,12 @@ void disk_part_iter_exit(struct disk_part_iter *piter)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(disk_part_iter_exit);
|
EXPORT_SYMBOL_GPL(disk_part_iter_exit);
|
||||||
|
|
||||||
|
static inline int sector_in_part(struct hd_struct *part, sector_t sector)
|
||||||
|
{
|
||||||
|
return part->start_sect <= sector &&
|
||||||
|
sector < part->start_sect + part->nr_sects;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* disk_map_sector_rcu - map sector to partition
|
* disk_map_sector_rcu - map sector to partition
|
||||||
* @disk: gendisk of interest
|
* @disk: gendisk of interest
|
||||||
|
@ -199,16 +205,22 @@ EXPORT_SYMBOL_GPL(disk_part_iter_exit);
|
||||||
struct hd_struct *disk_map_sector_rcu(struct gendisk *disk, sector_t sector)
|
struct hd_struct *disk_map_sector_rcu(struct gendisk *disk, sector_t sector)
|
||||||
{
|
{
|
||||||
struct disk_part_tbl *ptbl;
|
struct disk_part_tbl *ptbl;
|
||||||
|
struct hd_struct *part;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
ptbl = rcu_dereference(disk->part_tbl);
|
ptbl = rcu_dereference(disk->part_tbl);
|
||||||
|
|
||||||
for (i = 1; i < ptbl->len; i++) {
|
part = rcu_dereference(ptbl->last_lookup);
|
||||||
struct hd_struct *part = rcu_dereference(ptbl->part[i]);
|
if (part && sector_in_part(part, sector))
|
||||||
|
return part;
|
||||||
|
|
||||||
if (part && part->start_sect <= sector &&
|
for (i = 1; i < ptbl->len; i++) {
|
||||||
sector < part->start_sect + part->nr_sects)
|
part = rcu_dereference(ptbl->part[i]);
|
||||||
|
|
||||||
|
if (part && sector_in_part(part, sector)) {
|
||||||
|
rcu_assign_pointer(ptbl->last_lookup, part);
|
||||||
return part;
|
return part;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return &disk->part0;
|
return &disk->part0;
|
||||||
}
|
}
|
||||||
|
@ -888,8 +900,11 @@ static void disk_replace_part_tbl(struct gendisk *disk,
|
||||||
struct disk_part_tbl *old_ptbl = disk->part_tbl;
|
struct disk_part_tbl *old_ptbl = disk->part_tbl;
|
||||||
|
|
||||||
rcu_assign_pointer(disk->part_tbl, new_ptbl);
|
rcu_assign_pointer(disk->part_tbl, new_ptbl);
|
||||||
if (old_ptbl)
|
|
||||||
|
if (old_ptbl) {
|
||||||
|
rcu_assign_pointer(old_ptbl->last_lookup, NULL);
|
||||||
call_rcu(&old_ptbl->rcu_head, disk_free_ptbl_rcu_cb);
|
call_rcu(&old_ptbl->rcu_head, disk_free_ptbl_rcu_cb);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -323,9 +323,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
|
||||||
bdi = blk_get_backing_dev_info(bdev);
|
bdi = blk_get_backing_dev_info(bdev);
|
||||||
if (bdi == NULL)
|
if (bdi == NULL)
|
||||||
return -ENOTTY;
|
return -ENOTTY;
|
||||||
lock_kernel();
|
|
||||||
bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE;
|
bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE;
|
||||||
unlock_kernel();
|
|
||||||
return 0;
|
return 0;
|
||||||
case BLKBSZSET:
|
case BLKBSZSET:
|
||||||
/* set the logical block size */
|
/* set the logical block size */
|
||||||
|
|
|
@ -76,7 +76,7 @@ static void *noop_init_queue(struct request_queue *q)
|
||||||
return nd;
|
return nd;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void noop_exit_queue(elevator_t *e)
|
static void noop_exit_queue(struct elevator_queue *e)
|
||||||
{
|
{
|
||||||
struct noop_data *nd = e->elevator_data;
|
struct noop_data *nd = e->elevator_data;
|
||||||
|
|
||||||
|
|
|
@ -60,7 +60,7 @@ static int scsi_get_bus(struct request_queue *q, int __user *p)
|
||||||
|
|
||||||
static int sg_get_timeout(struct request_queue *q)
|
static int sg_get_timeout(struct request_queue *q)
|
||||||
{
|
{
|
||||||
return q->sg_timeout / (HZ / USER_HZ);
|
return jiffies_to_clock_t(q->sg_timeout);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int sg_set_timeout(struct request_queue *q, int __user *p)
|
static int sg_set_timeout(struct request_queue *q, int __user *p)
|
||||||
|
@ -68,7 +68,7 @@ static int sg_set_timeout(struct request_queue *q, int __user *p)
|
||||||
int timeout, err = get_user(timeout, p);
|
int timeout, err = get_user(timeout, p);
|
||||||
|
|
||||||
if (!err)
|
if (!err)
|
||||||
q->sg_timeout = timeout * (HZ / USER_HZ);
|
q->sg_timeout = clock_t_to_jiffies(timeout);
|
||||||
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
|
@ -164,7 +164,7 @@ static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
|
||||||
|
|
||||||
static int cciss_revalidate(struct gendisk *disk);
|
static int cciss_revalidate(struct gendisk *disk);
|
||||||
static int rebuild_lun_table(ctlr_info_t *h, int first_time);
|
static int rebuild_lun_table(ctlr_info_t *h, int first_time);
|
||||||
static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
|
static int deregister_disk(ctlr_info_t *h, int drv_index,
|
||||||
int clear_all);
|
int clear_all);
|
||||||
|
|
||||||
static void cciss_read_capacity(int ctlr, int logvol, int withirq,
|
static void cciss_read_capacity(int ctlr, int logvol, int withirq,
|
||||||
|
@ -215,31 +215,17 @@ static struct block_device_operations cciss_fops = {
|
||||||
/*
|
/*
|
||||||
* Enqueuing and dequeuing functions for cmdlists.
|
* Enqueuing and dequeuing functions for cmdlists.
|
||||||
*/
|
*/
|
||||||
static inline void addQ(CommandList_struct **Qptr, CommandList_struct *c)
|
static inline void addQ(struct hlist_head *list, CommandList_struct *c)
|
||||||
{
|
{
|
||||||
if (*Qptr == NULL) {
|
hlist_add_head(&c->list, list);
|
||||||
*Qptr = c;
|
|
||||||
c->next = c->prev = c;
|
|
||||||
} else {
|
|
||||||
c->prev = (*Qptr)->prev;
|
|
||||||
c->next = (*Qptr);
|
|
||||||
(*Qptr)->prev->next = c;
|
|
||||||
(*Qptr)->prev = c;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline CommandList_struct *removeQ(CommandList_struct **Qptr,
|
static inline void removeQ(CommandList_struct *c)
|
||||||
CommandList_struct *c)
|
|
||||||
{
|
{
|
||||||
if (c && c->next != c) {
|
if (WARN_ON(hlist_unhashed(&c->list)))
|
||||||
if (*Qptr == c)
|
return;
|
||||||
*Qptr = c->next;
|
|
||||||
c->prev->next = c->next;
|
hlist_del_init(&c->list);
|
||||||
c->next->prev = c->prev;
|
|
||||||
} else {
|
|
||||||
*Qptr = NULL;
|
|
||||||
}
|
|
||||||
return c;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#include "cciss_scsi.c" /* For SCSI tape support */
|
#include "cciss_scsi.c" /* For SCSI tape support */
|
||||||
|
@ -506,6 +492,7 @@ static CommandList_struct *cmd_alloc(ctlr_info_t *h, int get_from_pool)
|
||||||
c->cmdindex = i;
|
c->cmdindex = i;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
INIT_HLIST_NODE(&c->list);
|
||||||
c->busaddr = (__u32) cmd_dma_handle;
|
c->busaddr = (__u32) cmd_dma_handle;
|
||||||
temp64.val = (__u64) err_dma_handle;
|
temp64.val = (__u64) err_dma_handle;
|
||||||
c->ErrDesc.Addr.lower = temp64.val32.lower;
|
c->ErrDesc.Addr.lower = temp64.val32.lower;
|
||||||
|
@ -1492,8 +1479,7 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time)
|
||||||
* which keeps the interrupt handler from starting
|
* which keeps the interrupt handler from starting
|
||||||
* the queue.
|
* the queue.
|
||||||
*/
|
*/
|
||||||
ret = deregister_disk(h->gendisk[drv_index],
|
ret = deregister_disk(h, drv_index, 0);
|
||||||
&h->drv[drv_index], 0);
|
|
||||||
h->drv[drv_index].busy_configuring = 0;
|
h->drv[drv_index].busy_configuring = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1711,8 +1697,7 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time)
|
||||||
spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
|
spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
|
||||||
h->drv[i].busy_configuring = 1;
|
h->drv[i].busy_configuring = 1;
|
||||||
spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
|
spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
|
||||||
return_code = deregister_disk(h->gendisk[i],
|
return_code = deregister_disk(h, i, 1);
|
||||||
&h->drv[i], 1);
|
|
||||||
h->drv[i].busy_configuring = 0;
|
h->drv[i].busy_configuring = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1782,15 +1767,19 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time)
|
||||||
* the highest_lun should be left unchanged and the LunID
|
* the highest_lun should be left unchanged and the LunID
|
||||||
* should not be cleared.
|
* should not be cleared.
|
||||||
*/
|
*/
|
||||||
static int deregister_disk(struct gendisk *disk, drive_info_struct *drv,
|
static int deregister_disk(ctlr_info_t *h, int drv_index,
|
||||||
int clear_all)
|
int clear_all)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
ctlr_info_t *h = get_host(disk);
|
struct gendisk *disk;
|
||||||
|
drive_info_struct *drv;
|
||||||
|
|
||||||
if (!capable(CAP_SYS_RAWIO))
|
if (!capable(CAP_SYS_RAWIO))
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
||||||
|
drv = &h->drv[drv_index];
|
||||||
|
disk = h->gendisk[drv_index];
|
||||||
|
|
||||||
/* make sure logical volume is NOT is use */
|
/* make sure logical volume is NOT is use */
|
||||||
if (clear_all || (h->gendisk[0] == disk)) {
|
if (clear_all || (h->gendisk[0] == disk)) {
|
||||||
if (drv->usage_count > 1)
|
if (drv->usage_count > 1)
|
||||||
|
@ -2548,7 +2537,8 @@ static void start_io(ctlr_info_t *h)
|
||||||
{
|
{
|
||||||
CommandList_struct *c;
|
CommandList_struct *c;
|
||||||
|
|
||||||
while ((c = h->reqQ) != NULL) {
|
while (!hlist_empty(&h->reqQ)) {
|
||||||
|
c = hlist_entry(h->reqQ.first, CommandList_struct, list);
|
||||||
/* can't do anything if fifo is full */
|
/* can't do anything if fifo is full */
|
||||||
if ((h->access.fifo_full(h))) {
|
if ((h->access.fifo_full(h))) {
|
||||||
printk(KERN_WARNING "cciss: fifo full\n");
|
printk(KERN_WARNING "cciss: fifo full\n");
|
||||||
|
@ -2556,14 +2546,14 @@ static void start_io(ctlr_info_t *h)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Get the first entry from the Request Q */
|
/* Get the first entry from the Request Q */
|
||||||
removeQ(&(h->reqQ), c);
|
removeQ(c);
|
||||||
h->Qdepth--;
|
h->Qdepth--;
|
||||||
|
|
||||||
/* Tell the controller execute command */
|
/* Tell the controller execute command */
|
||||||
h->access.submit_command(h, c);
|
h->access.submit_command(h, c);
|
||||||
|
|
||||||
/* Put job onto the completed Q */
|
/* Put job onto the completed Q */
|
||||||
addQ(&(h->cmpQ), c);
|
addQ(&h->cmpQ, c);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2576,7 +2566,7 @@ static inline void resend_cciss_cmd(ctlr_info_t *h, CommandList_struct *c)
|
||||||
memset(c->err_info, 0, sizeof(ErrorInfo_struct));
|
memset(c->err_info, 0, sizeof(ErrorInfo_struct));
|
||||||
|
|
||||||
/* add it to software queue and then send it to the controller */
|
/* add it to software queue and then send it to the controller */
|
||||||
addQ(&(h->reqQ), c);
|
addQ(&h->reqQ, c);
|
||||||
h->Qdepth++;
|
h->Qdepth++;
|
||||||
if (h->Qdepth > h->maxQsinceinit)
|
if (h->Qdepth > h->maxQsinceinit)
|
||||||
h->maxQsinceinit = h->Qdepth;
|
h->maxQsinceinit = h->Qdepth;
|
||||||
|
@ -2897,7 +2887,7 @@ static void do_cciss_request(struct request_queue *q)
|
||||||
|
|
||||||
spin_lock_irq(q->queue_lock);
|
spin_lock_irq(q->queue_lock);
|
||||||
|
|
||||||
addQ(&(h->reqQ), c);
|
addQ(&h->reqQ, c);
|
||||||
h->Qdepth++;
|
h->Qdepth++;
|
||||||
if (h->Qdepth > h->maxQsinceinit)
|
if (h->Qdepth > h->maxQsinceinit)
|
||||||
h->maxQsinceinit = h->Qdepth;
|
h->maxQsinceinit = h->Qdepth;
|
||||||
|
@ -2985,16 +2975,12 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id)
|
||||||
a = c->busaddr;
|
a = c->busaddr;
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
|
struct hlist_node *tmp;
|
||||||
|
|
||||||
a &= ~3;
|
a &= ~3;
|
||||||
if ((c = h->cmpQ) == NULL) {
|
c = NULL;
|
||||||
printk(KERN_WARNING
|
hlist_for_each_entry(c, tmp, &h->cmpQ, list) {
|
||||||
"cciss: Completion of %08x ignored\n",
|
if (c->busaddr == a)
|
||||||
a1);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
while (c->busaddr != a) {
|
|
||||||
c = c->next;
|
|
||||||
if (c == h->cmpQ)
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -3002,8 +2988,8 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id)
|
||||||
* If we've found the command, take it off the
|
* If we've found the command, take it off the
|
||||||
* completion Q and free it
|
* completion Q and free it
|
||||||
*/
|
*/
|
||||||
if (c->busaddr == a) {
|
if (c && c->busaddr == a) {
|
||||||
removeQ(&h->cmpQ, c);
|
removeQ(c);
|
||||||
if (c->cmd_type == CMD_RWREQ) {
|
if (c->cmd_type == CMD_RWREQ) {
|
||||||
complete_command(h, c, 0);
|
complete_command(h, c, 0);
|
||||||
} else if (c->cmd_type == CMD_IOCTL_PEND) {
|
} else if (c->cmd_type == CMD_IOCTL_PEND) {
|
||||||
|
@ -3423,6 +3409,8 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
hba[i]->busy_initializing = 1;
|
hba[i]->busy_initializing = 1;
|
||||||
|
INIT_HLIST_HEAD(&hba[i]->cmpQ);
|
||||||
|
INIT_HLIST_HEAD(&hba[i]->reqQ);
|
||||||
|
|
||||||
if (cciss_pci_init(hba[i], pdev) != 0)
|
if (cciss_pci_init(hba[i], pdev) != 0)
|
||||||
goto clean1;
|
goto clean1;
|
||||||
|
@ -3730,15 +3718,17 @@ static void fail_all_cmds(unsigned long ctlr)
|
||||||
pci_disable_device(h->pdev); /* Make sure it is really dead. */
|
pci_disable_device(h->pdev); /* Make sure it is really dead. */
|
||||||
|
|
||||||
/* move everything off the request queue onto the completed queue */
|
/* move everything off the request queue onto the completed queue */
|
||||||
while ((c = h->reqQ) != NULL) {
|
while (!hlist_empty(&h->reqQ)) {
|
||||||
removeQ(&(h->reqQ), c);
|
c = hlist_entry(h->reqQ.first, CommandList_struct, list);
|
||||||
|
removeQ(c);
|
||||||
h->Qdepth--;
|
h->Qdepth--;
|
||||||
addQ(&(h->cmpQ), c);
|
addQ(&h->cmpQ, c);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Now, fail everything on the completed queue with a HW error */
|
/* Now, fail everything on the completed queue with a HW error */
|
||||||
while ((c = h->cmpQ) != NULL) {
|
while (!hlist_empty(&h->cmpQ)) {
|
||||||
removeQ(&h->cmpQ, c);
|
c = hlist_entry(h->cmpQ.first, CommandList_struct, list);
|
||||||
|
removeQ(c);
|
||||||
c->err_info->CommandStatus = CMD_HARDWARE_ERR;
|
c->err_info->CommandStatus = CMD_HARDWARE_ERR;
|
||||||
if (c->cmd_type == CMD_RWREQ) {
|
if (c->cmd_type == CMD_RWREQ) {
|
||||||
complete_command(h, c, 0);
|
complete_command(h, c, 0);
|
||||||
|
|
|
@ -89,8 +89,8 @@ struct ctlr_info
|
||||||
struct access_method access;
|
struct access_method access;
|
||||||
|
|
||||||
/* queue and queue Info */
|
/* queue and queue Info */
|
||||||
CommandList_struct *reqQ;
|
struct hlist_head reqQ;
|
||||||
CommandList_struct *cmpQ;
|
struct hlist_head cmpQ;
|
||||||
unsigned int Qdepth;
|
unsigned int Qdepth;
|
||||||
unsigned int maxQsinceinit;
|
unsigned int maxQsinceinit;
|
||||||
unsigned int maxSG;
|
unsigned int maxSG;
|
||||||
|
|
|
@ -265,8 +265,7 @@ typedef struct _CommandList_struct {
|
||||||
int ctlr;
|
int ctlr;
|
||||||
int cmd_type;
|
int cmd_type;
|
||||||
long cmdindex;
|
long cmdindex;
|
||||||
struct _CommandList_struct *prev;
|
struct hlist_node list;
|
||||||
struct _CommandList_struct *next;
|
|
||||||
struct request * rq;
|
struct request * rq;
|
||||||
struct completion *waiting;
|
struct completion *waiting;
|
||||||
int retry_count;
|
int retry_count;
|
||||||
|
|
|
@ -623,6 +623,18 @@ static int loop_switch(struct loop_device *lo, struct file *file)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Helper to flush the IOs in loop, but keeping loop thread running
|
||||||
|
*/
|
||||||
|
static int loop_flush(struct loop_device *lo)
|
||||||
|
{
|
||||||
|
/* loop not yet configured, no running thread, nothing to flush */
|
||||||
|
if (!lo->lo_thread)
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
return loop_switch(lo, NULL);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Do the actual switch; called from the BIO completion routine
|
* Do the actual switch; called from the BIO completion routine
|
||||||
*/
|
*/
|
||||||
|
@ -630,14 +642,20 @@ static void do_loop_switch(struct loop_device *lo, struct switch_request *p)
|
||||||
{
|
{
|
||||||
struct file *file = p->file;
|
struct file *file = p->file;
|
||||||
struct file *old_file = lo->lo_backing_file;
|
struct file *old_file = lo->lo_backing_file;
|
||||||
struct address_space *mapping = file->f_mapping;
|
struct address_space *mapping;
|
||||||
|
|
||||||
|
/* if no new file, only flush of queued bios requested */
|
||||||
|
if (!file)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
mapping = file->f_mapping;
|
||||||
mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask);
|
mapping_set_gfp_mask(old_file->f_mapping, lo->old_gfp_mask);
|
||||||
lo->lo_backing_file = file;
|
lo->lo_backing_file = file;
|
||||||
lo->lo_blocksize = S_ISBLK(mapping->host->i_mode) ?
|
lo->lo_blocksize = S_ISBLK(mapping->host->i_mode) ?
|
||||||
mapping->host->i_bdev->bd_block_size : PAGE_SIZE;
|
mapping->host->i_bdev->bd_block_size : PAGE_SIZE;
|
||||||
lo->old_gfp_mask = mapping_gfp_mask(mapping);
|
lo->old_gfp_mask = mapping_gfp_mask(mapping);
|
||||||
mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
|
mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS));
|
||||||
|
out:
|
||||||
complete(&p->wait);
|
complete(&p->wait);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -901,6 +919,7 @@ static int loop_clr_fd(struct loop_device *lo, struct block_device *bdev)
|
||||||
|
|
||||||
kthread_stop(lo->lo_thread);
|
kthread_stop(lo->lo_thread);
|
||||||
|
|
||||||
|
lo->lo_queue->unplug_fn = NULL;
|
||||||
lo->lo_backing_file = NULL;
|
lo->lo_backing_file = NULL;
|
||||||
|
|
||||||
loop_release_xfer(lo);
|
loop_release_xfer(lo);
|
||||||
|
@ -1345,11 +1364,25 @@ static int lo_release(struct gendisk *disk, fmode_t mode)
|
||||||
struct loop_device *lo = disk->private_data;
|
struct loop_device *lo = disk->private_data;
|
||||||
|
|
||||||
mutex_lock(&lo->lo_ctl_mutex);
|
mutex_lock(&lo->lo_ctl_mutex);
|
||||||
--lo->lo_refcnt;
|
|
||||||
|
|
||||||
if ((lo->lo_flags & LO_FLAGS_AUTOCLEAR) && !lo->lo_refcnt)
|
if (--lo->lo_refcnt)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
if (lo->lo_flags & LO_FLAGS_AUTOCLEAR) {
|
||||||
|
/*
|
||||||
|
* In autoclear mode, stop the loop thread
|
||||||
|
* and remove configuration after last close.
|
||||||
|
*/
|
||||||
loop_clr_fd(lo, NULL);
|
loop_clr_fd(lo, NULL);
|
||||||
|
} else {
|
||||||
|
/*
|
||||||
|
* Otherwise keep thread (if running) and config,
|
||||||
|
* but flush possible ongoing bios in thread.
|
||||||
|
*/
|
||||||
|
loop_flush(lo);
|
||||||
|
}
|
||||||
|
|
||||||
|
out:
|
||||||
mutex_unlock(&lo->lo_ctl_mutex);
|
mutex_unlock(&lo->lo_ctl_mutex);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -722,7 +722,6 @@ static int __init nbd_init(void)
|
||||||
|
|
||||||
for (i = 0; i < nbds_max; i++) {
|
for (i = 0; i < nbds_max; i++) {
|
||||||
struct gendisk *disk = alloc_disk(1 << part_shift);
|
struct gendisk *disk = alloc_disk(1 << part_shift);
|
||||||
elevator_t *old_e;
|
|
||||||
if (!disk)
|
if (!disk)
|
||||||
goto out;
|
goto out;
|
||||||
nbd_dev[i].disk = disk;
|
nbd_dev[i].disk = disk;
|
||||||
|
@ -736,11 +735,10 @@ static int __init nbd_init(void)
|
||||||
put_disk(disk);
|
put_disk(disk);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
old_e = disk->queue->elevator;
|
/*
|
||||||
if (elevator_init(disk->queue, "deadline") == 0 ||
|
* Tell the block layer that we are not a rotational device
|
||||||
elevator_init(disk->queue, "noop") == 0) {
|
*/
|
||||||
elevator_exit(old_e);
|
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (register_blkdev(NBD_MAJOR, "nbd")) {
|
if (register_blkdev(NBD_MAJOR, "nbd")) {
|
||||||
|
|
|
@ -237,6 +237,8 @@ static int virtblk_probe(struct virtio_device *vdev)
|
||||||
goto out_put_disk;
|
goto out_put_disk;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
queue_flag_set_unlocked(QUEUE_FLAG_VIRT, vblk->disk->queue);
|
||||||
|
|
||||||
if (index < 26) {
|
if (index < 26) {
|
||||||
sprintf(vblk->disk->disk_name, "vd%c", 'a' + index % 26);
|
sprintf(vblk->disk->disk_name, "vd%c", 'a' + index % 26);
|
||||||
} else if (index < (26 + 1) * 26) {
|
} else if (index < (26 + 1) * 26) {
|
||||||
|
|
|
@ -338,18 +338,12 @@ static void do_blkif_request(struct request_queue *rq)
|
||||||
static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
|
static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
|
||||||
{
|
{
|
||||||
struct request_queue *rq;
|
struct request_queue *rq;
|
||||||
elevator_t *old_e;
|
|
||||||
|
|
||||||
rq = blk_init_queue(do_blkif_request, &blkif_io_lock);
|
rq = blk_init_queue(do_blkif_request, &blkif_io_lock);
|
||||||
if (rq == NULL)
|
if (rq == NULL)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
old_e = rq->elevator;
|
queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
|
||||||
if (IS_ERR_VALUE(elevator_init(rq, "noop")))
|
|
||||||
printk(KERN_WARNING
|
|
||||||
"blkfront: Switch elevator failed, use default\n");
|
|
||||||
else
|
|
||||||
elevator_exit(old_e);
|
|
||||||
|
|
||||||
/* Hard sector size and max sectors impersonate the equiv. hardware. */
|
/* Hard sector size and max sectors impersonate the equiv. hardware. */
|
||||||
blk_queue_hardsect_size(rq, sector_size);
|
blk_queue_hardsect_size(rq, sector_size);
|
||||||
|
|
|
@ -1712,29 +1712,30 @@ static int dvd_do_auth(struct cdrom_device_info *cdi, dvd_authinfo *ai)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int dvd_read_physical(struct cdrom_device_info *cdi, dvd_struct *s)
|
static int dvd_read_physical(struct cdrom_device_info *cdi, dvd_struct *s,
|
||||||
|
struct packet_command *cgc)
|
||||||
{
|
{
|
||||||
unsigned char buf[21], *base;
|
unsigned char buf[21], *base;
|
||||||
struct dvd_layer *layer;
|
struct dvd_layer *layer;
|
||||||
struct packet_command cgc;
|
|
||||||
struct cdrom_device_ops *cdo = cdi->ops;
|
struct cdrom_device_ops *cdo = cdi->ops;
|
||||||
int ret, layer_num = s->physical.layer_num;
|
int ret, layer_num = s->physical.layer_num;
|
||||||
|
|
||||||
if (layer_num >= DVD_LAYERS)
|
if (layer_num >= DVD_LAYERS)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
|
init_cdrom_command(cgc, buf, sizeof(buf), CGC_DATA_READ);
|
||||||
cgc.cmd[0] = GPCMD_READ_DVD_STRUCTURE;
|
cgc->cmd[0] = GPCMD_READ_DVD_STRUCTURE;
|
||||||
cgc.cmd[6] = layer_num;
|
cgc->cmd[6] = layer_num;
|
||||||
cgc.cmd[7] = s->type;
|
cgc->cmd[7] = s->type;
|
||||||
cgc.cmd[9] = cgc.buflen & 0xff;
|
cgc->cmd[9] = cgc->buflen & 0xff;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* refrain from reporting errors on non-existing layers (mainly)
|
* refrain from reporting errors on non-existing layers (mainly)
|
||||||
*/
|
*/
|
||||||
cgc.quiet = 1;
|
cgc->quiet = 1;
|
||||||
|
|
||||||
if ((ret = cdo->generic_packet(cdi, &cgc)))
|
ret = cdo->generic_packet(cdi, cgc);
|
||||||
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
base = &buf[4];
|
base = &buf[4];
|
||||||
|
@ -1762,21 +1763,22 @@ static int dvd_read_physical(struct cdrom_device_info *cdi, dvd_struct *s)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int dvd_read_copyright(struct cdrom_device_info *cdi, dvd_struct *s)
|
static int dvd_read_copyright(struct cdrom_device_info *cdi, dvd_struct *s,
|
||||||
|
struct packet_command *cgc)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
u_char buf[8];
|
u_char buf[8];
|
||||||
struct packet_command cgc;
|
|
||||||
struct cdrom_device_ops *cdo = cdi->ops;
|
struct cdrom_device_ops *cdo = cdi->ops;
|
||||||
|
|
||||||
init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
|
init_cdrom_command(cgc, buf, sizeof(buf), CGC_DATA_READ);
|
||||||
cgc.cmd[0] = GPCMD_READ_DVD_STRUCTURE;
|
cgc->cmd[0] = GPCMD_READ_DVD_STRUCTURE;
|
||||||
cgc.cmd[6] = s->copyright.layer_num;
|
cgc->cmd[6] = s->copyright.layer_num;
|
||||||
cgc.cmd[7] = s->type;
|
cgc->cmd[7] = s->type;
|
||||||
cgc.cmd[8] = cgc.buflen >> 8;
|
cgc->cmd[8] = cgc->buflen >> 8;
|
||||||
cgc.cmd[9] = cgc.buflen & 0xff;
|
cgc->cmd[9] = cgc->buflen & 0xff;
|
||||||
|
|
||||||
if ((ret = cdo->generic_packet(cdi, &cgc)))
|
ret = cdo->generic_packet(cdi, cgc);
|
||||||
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
s->copyright.cpst = buf[4];
|
s->copyright.cpst = buf[4];
|
||||||
|
@ -1785,79 +1787,89 @@ static int dvd_read_copyright(struct cdrom_device_info *cdi, dvd_struct *s)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int dvd_read_disckey(struct cdrom_device_info *cdi, dvd_struct *s)
|
static int dvd_read_disckey(struct cdrom_device_info *cdi, dvd_struct *s,
|
||||||
|
struct packet_command *cgc)
|
||||||
{
|
{
|
||||||
int ret, size;
|
int ret, size;
|
||||||
u_char *buf;
|
u_char *buf;
|
||||||
struct packet_command cgc;
|
|
||||||
struct cdrom_device_ops *cdo = cdi->ops;
|
struct cdrom_device_ops *cdo = cdi->ops;
|
||||||
|
|
||||||
size = sizeof(s->disckey.value) + 4;
|
size = sizeof(s->disckey.value) + 4;
|
||||||
|
|
||||||
if ((buf = kmalloc(size, GFP_KERNEL)) == NULL)
|
buf = kmalloc(size, GFP_KERNEL);
|
||||||
|
if (!buf)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
init_cdrom_command(&cgc, buf, size, CGC_DATA_READ);
|
init_cdrom_command(cgc, buf, size, CGC_DATA_READ);
|
||||||
cgc.cmd[0] = GPCMD_READ_DVD_STRUCTURE;
|
cgc->cmd[0] = GPCMD_READ_DVD_STRUCTURE;
|
||||||
cgc.cmd[7] = s->type;
|
cgc->cmd[7] = s->type;
|
||||||
cgc.cmd[8] = size >> 8;
|
cgc->cmd[8] = size >> 8;
|
||||||
cgc.cmd[9] = size & 0xff;
|
cgc->cmd[9] = size & 0xff;
|
||||||
cgc.cmd[10] = s->disckey.agid << 6;
|
cgc->cmd[10] = s->disckey.agid << 6;
|
||||||
|
|
||||||
if (!(ret = cdo->generic_packet(cdi, &cgc)))
|
ret = cdo->generic_packet(cdi, cgc);
|
||||||
|
if (!ret)
|
||||||
memcpy(s->disckey.value, &buf[4], sizeof(s->disckey.value));
|
memcpy(s->disckey.value, &buf[4], sizeof(s->disckey.value));
|
||||||
|
|
||||||
kfree(buf);
|
kfree(buf);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int dvd_read_bca(struct cdrom_device_info *cdi, dvd_struct *s)
|
static int dvd_read_bca(struct cdrom_device_info *cdi, dvd_struct *s,
|
||||||
|
struct packet_command *cgc)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret, size = 4 + 188;
|
||||||
u_char buf[4 + 188];
|
u_char *buf;
|
||||||
struct packet_command cgc;
|
|
||||||
struct cdrom_device_ops *cdo = cdi->ops;
|
struct cdrom_device_ops *cdo = cdi->ops;
|
||||||
|
|
||||||
init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
|
buf = kmalloc(size, GFP_KERNEL);
|
||||||
cgc.cmd[0] = GPCMD_READ_DVD_STRUCTURE;
|
if (!buf)
|
||||||
cgc.cmd[7] = s->type;
|
return -ENOMEM;
|
||||||
cgc.cmd[9] = cgc.buflen & 0xff;
|
|
||||||
|
|
||||||
if ((ret = cdo->generic_packet(cdi, &cgc)))
|
init_cdrom_command(cgc, buf, size, CGC_DATA_READ);
|
||||||
return ret;
|
cgc->cmd[0] = GPCMD_READ_DVD_STRUCTURE;
|
||||||
|
cgc->cmd[7] = s->type;
|
||||||
|
cgc->cmd[9] = cgc->buflen & 0xff;
|
||||||
|
|
||||||
|
ret = cdo->generic_packet(cdi, cgc);
|
||||||
|
if (ret)
|
||||||
|
goto out;
|
||||||
|
|
||||||
s->bca.len = buf[0] << 8 | buf[1];
|
s->bca.len = buf[0] << 8 | buf[1];
|
||||||
if (s->bca.len < 12 || s->bca.len > 188) {
|
if (s->bca.len < 12 || s->bca.len > 188) {
|
||||||
cdinfo(CD_WARNING, "Received invalid BCA length (%d)\n", s->bca.len);
|
cdinfo(CD_WARNING, "Received invalid BCA length (%d)\n", s->bca.len);
|
||||||
return -EIO;
|
ret = -EIO;
|
||||||
|
goto out;
|
||||||
}
|
}
|
||||||
memcpy(s->bca.value, &buf[4], s->bca.len);
|
memcpy(s->bca.value, &buf[4], s->bca.len);
|
||||||
|
ret = 0;
|
||||||
return 0;
|
out:
|
||||||
|
kfree(buf);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int dvd_read_manufact(struct cdrom_device_info *cdi, dvd_struct *s)
|
static int dvd_read_manufact(struct cdrom_device_info *cdi, dvd_struct *s,
|
||||||
|
struct packet_command *cgc)
|
||||||
{
|
{
|
||||||
int ret = 0, size;
|
int ret = 0, size;
|
||||||
u_char *buf;
|
u_char *buf;
|
||||||
struct packet_command cgc;
|
|
||||||
struct cdrom_device_ops *cdo = cdi->ops;
|
struct cdrom_device_ops *cdo = cdi->ops;
|
||||||
|
|
||||||
size = sizeof(s->manufact.value) + 4;
|
size = sizeof(s->manufact.value) + 4;
|
||||||
|
|
||||||
if ((buf = kmalloc(size, GFP_KERNEL)) == NULL)
|
buf = kmalloc(size, GFP_KERNEL);
|
||||||
|
if (!buf)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
init_cdrom_command(&cgc, buf, size, CGC_DATA_READ);
|
init_cdrom_command(cgc, buf, size, CGC_DATA_READ);
|
||||||
cgc.cmd[0] = GPCMD_READ_DVD_STRUCTURE;
|
cgc->cmd[0] = GPCMD_READ_DVD_STRUCTURE;
|
||||||
cgc.cmd[7] = s->type;
|
cgc->cmd[7] = s->type;
|
||||||
cgc.cmd[8] = size >> 8;
|
cgc->cmd[8] = size >> 8;
|
||||||
cgc.cmd[9] = size & 0xff;
|
cgc->cmd[9] = size & 0xff;
|
||||||
|
|
||||||
if ((ret = cdo->generic_packet(cdi, &cgc))) {
|
ret = cdo->generic_packet(cdi, cgc);
|
||||||
kfree(buf);
|
if (ret)
|
||||||
return ret;
|
goto out;
|
||||||
}
|
|
||||||
|
|
||||||
s->manufact.len = buf[0] << 8 | buf[1];
|
s->manufact.len = buf[0] << 8 | buf[1];
|
||||||
if (s->manufact.len < 0 || s->manufact.len > 2048) {
|
if (s->manufact.len < 0 || s->manufact.len > 2048) {
|
||||||
|
@ -1868,27 +1880,29 @@ static int dvd_read_manufact(struct cdrom_device_info *cdi, dvd_struct *s)
|
||||||
memcpy(s->manufact.value, &buf[4], s->manufact.len);
|
memcpy(s->manufact.value, &buf[4], s->manufact.len);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
out:
|
||||||
kfree(buf);
|
kfree(buf);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int dvd_read_struct(struct cdrom_device_info *cdi, dvd_struct *s)
|
static int dvd_read_struct(struct cdrom_device_info *cdi, dvd_struct *s,
|
||||||
|
struct packet_command *cgc)
|
||||||
{
|
{
|
||||||
switch (s->type) {
|
switch (s->type) {
|
||||||
case DVD_STRUCT_PHYSICAL:
|
case DVD_STRUCT_PHYSICAL:
|
||||||
return dvd_read_physical(cdi, s);
|
return dvd_read_physical(cdi, s, cgc);
|
||||||
|
|
||||||
case DVD_STRUCT_COPYRIGHT:
|
case DVD_STRUCT_COPYRIGHT:
|
||||||
return dvd_read_copyright(cdi, s);
|
return dvd_read_copyright(cdi, s, cgc);
|
||||||
|
|
||||||
case DVD_STRUCT_DISCKEY:
|
case DVD_STRUCT_DISCKEY:
|
||||||
return dvd_read_disckey(cdi, s);
|
return dvd_read_disckey(cdi, s, cgc);
|
||||||
|
|
||||||
case DVD_STRUCT_BCA:
|
case DVD_STRUCT_BCA:
|
||||||
return dvd_read_bca(cdi, s);
|
return dvd_read_bca(cdi, s, cgc);
|
||||||
|
|
||||||
case DVD_STRUCT_MANUFACT:
|
case DVD_STRUCT_MANUFACT:
|
||||||
return dvd_read_manufact(cdi, s);
|
return dvd_read_manufact(cdi, s, cgc);
|
||||||
|
|
||||||
default:
|
default:
|
||||||
cdinfo(CD_WARNING, ": Invalid DVD structure read requested (%d)\n",
|
cdinfo(CD_WARNING, ": Invalid DVD structure read requested (%d)\n",
|
||||||
|
@ -2787,14 +2801,324 @@ static int cdrom_switch_blocksize(struct cdrom_device_info *cdi, int size)
|
||||||
return cdo->generic_packet(cdi, &cgc);
|
return cdo->generic_packet(cdi, &cgc);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static noinline int mmc_ioctl_cdrom_read_data(struct cdrom_device_info *cdi,
|
||||||
|
void __user *arg,
|
||||||
|
struct packet_command *cgc,
|
||||||
|
int cmd)
|
||||||
|
{
|
||||||
|
struct request_sense sense;
|
||||||
|
struct cdrom_msf msf;
|
||||||
|
int blocksize = 0, format = 0, lba;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
switch (cmd) {
|
||||||
|
case CDROMREADRAW:
|
||||||
|
blocksize = CD_FRAMESIZE_RAW;
|
||||||
|
break;
|
||||||
|
case CDROMREADMODE1:
|
||||||
|
blocksize = CD_FRAMESIZE;
|
||||||
|
format = 2;
|
||||||
|
break;
|
||||||
|
case CDROMREADMODE2:
|
||||||
|
blocksize = CD_FRAMESIZE_RAW0;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
IOCTL_IN(arg, struct cdrom_msf, msf);
|
||||||
|
lba = msf_to_lba(msf.cdmsf_min0, msf.cdmsf_sec0, msf.cdmsf_frame0);
|
||||||
|
/* FIXME: we need upper bound checking, too!! */
|
||||||
|
if (lba < 0)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
cgc->buffer = kmalloc(blocksize, GFP_KERNEL);
|
||||||
|
if (cgc->buffer == NULL)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
memset(&sense, 0, sizeof(sense));
|
||||||
|
cgc->sense = &sense;
|
||||||
|
cgc->data_direction = CGC_DATA_READ;
|
||||||
|
ret = cdrom_read_block(cdi, cgc, lba, 1, format, blocksize);
|
||||||
|
if (ret && sense.sense_key == 0x05 &&
|
||||||
|
sense.asc == 0x20 &&
|
||||||
|
sense.ascq == 0x00) {
|
||||||
|
/*
|
||||||
|
* SCSI-II devices are not required to support
|
||||||
|
* READ_CD, so let's try switching block size
|
||||||
|
*/
|
||||||
|
/* FIXME: switch back again... */
|
||||||
|
ret = cdrom_switch_blocksize(cdi, blocksize);
|
||||||
|
if (ret)
|
||||||
|
goto out;
|
||||||
|
cgc->sense = NULL;
|
||||||
|
ret = cdrom_read_cd(cdi, cgc, lba, blocksize, 1);
|
||||||
|
ret |= cdrom_switch_blocksize(cdi, blocksize);
|
||||||
|
}
|
||||||
|
if (!ret && copy_to_user(arg, cgc->buffer, blocksize))
|
||||||
|
ret = -EFAULT;
|
||||||
|
out:
|
||||||
|
kfree(cgc->buffer);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static noinline int mmc_ioctl_cdrom_read_audio(struct cdrom_device_info *cdi,
|
||||||
|
void __user *arg)
|
||||||
|
{
|
||||||
|
struct cdrom_read_audio ra;
|
||||||
|
int lba;
|
||||||
|
|
||||||
|
IOCTL_IN(arg, struct cdrom_read_audio, ra);
|
||||||
|
|
||||||
|
if (ra.addr_format == CDROM_MSF)
|
||||||
|
lba = msf_to_lba(ra.addr.msf.minute,
|
||||||
|
ra.addr.msf.second,
|
||||||
|
ra.addr.msf.frame);
|
||||||
|
else if (ra.addr_format == CDROM_LBA)
|
||||||
|
lba = ra.addr.lba;
|
||||||
|
else
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
/* FIXME: we need upper bound checking, too!! */
|
||||||
|
if (lba < 0 || ra.nframes <= 0 || ra.nframes > CD_FRAMES)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
return cdrom_read_cdda(cdi, ra.buf, lba, ra.nframes);
|
||||||
|
}
|
||||||
|
|
||||||
|
static noinline int mmc_ioctl_cdrom_subchannel(struct cdrom_device_info *cdi,
|
||||||
|
void __user *arg)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
struct cdrom_subchnl q;
|
||||||
|
u_char requested, back;
|
||||||
|
IOCTL_IN(arg, struct cdrom_subchnl, q);
|
||||||
|
requested = q.cdsc_format;
|
||||||
|
if (!((requested == CDROM_MSF) ||
|
||||||
|
(requested == CDROM_LBA)))
|
||||||
|
return -EINVAL;
|
||||||
|
q.cdsc_format = CDROM_MSF;
|
||||||
|
ret = cdrom_read_subchannel(cdi, &q, 0);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
back = q.cdsc_format; /* local copy */
|
||||||
|
sanitize_format(&q.cdsc_absaddr, &back, requested);
|
||||||
|
sanitize_format(&q.cdsc_reladdr, &q.cdsc_format, requested);
|
||||||
|
IOCTL_OUT(arg, struct cdrom_subchnl, q);
|
||||||
|
/* cdinfo(CD_DO_IOCTL, "CDROMSUBCHNL successful\n"); */
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static noinline int mmc_ioctl_cdrom_play_msf(struct cdrom_device_info *cdi,
|
||||||
|
void __user *arg,
|
||||||
|
struct packet_command *cgc)
|
||||||
|
{
|
||||||
|
struct cdrom_device_ops *cdo = cdi->ops;
|
||||||
|
struct cdrom_msf msf;
|
||||||
|
cdinfo(CD_DO_IOCTL, "entering CDROMPLAYMSF\n");
|
||||||
|
IOCTL_IN(arg, struct cdrom_msf, msf);
|
||||||
|
cgc->cmd[0] = GPCMD_PLAY_AUDIO_MSF;
|
||||||
|
cgc->cmd[3] = msf.cdmsf_min0;
|
||||||
|
cgc->cmd[4] = msf.cdmsf_sec0;
|
||||||
|
cgc->cmd[5] = msf.cdmsf_frame0;
|
||||||
|
cgc->cmd[6] = msf.cdmsf_min1;
|
||||||
|
cgc->cmd[7] = msf.cdmsf_sec1;
|
||||||
|
cgc->cmd[8] = msf.cdmsf_frame1;
|
||||||
|
cgc->data_direction = CGC_DATA_NONE;
|
||||||
|
return cdo->generic_packet(cdi, cgc);
|
||||||
|
}
|
||||||
|
|
||||||
|
static noinline int mmc_ioctl_cdrom_play_blk(struct cdrom_device_info *cdi,
|
||||||
|
void __user *arg,
|
||||||
|
struct packet_command *cgc)
|
||||||
|
{
|
||||||
|
struct cdrom_device_ops *cdo = cdi->ops;
|
||||||
|
struct cdrom_blk blk;
|
||||||
|
cdinfo(CD_DO_IOCTL, "entering CDROMPLAYBLK\n");
|
||||||
|
IOCTL_IN(arg, struct cdrom_blk, blk);
|
||||||
|
cgc->cmd[0] = GPCMD_PLAY_AUDIO_10;
|
||||||
|
cgc->cmd[2] = (blk.from >> 24) & 0xff;
|
||||||
|
cgc->cmd[3] = (blk.from >> 16) & 0xff;
|
||||||
|
cgc->cmd[4] = (blk.from >> 8) & 0xff;
|
||||||
|
cgc->cmd[5] = blk.from & 0xff;
|
||||||
|
cgc->cmd[7] = (blk.len >> 8) & 0xff;
|
||||||
|
cgc->cmd[8] = blk.len & 0xff;
|
||||||
|
cgc->data_direction = CGC_DATA_NONE;
|
||||||
|
return cdo->generic_packet(cdi, cgc);
|
||||||
|
}
|
||||||
|
|
||||||
|
static noinline int mmc_ioctl_cdrom_volume(struct cdrom_device_info *cdi,
|
||||||
|
void __user *arg,
|
||||||
|
struct packet_command *cgc,
|
||||||
|
unsigned int cmd)
|
||||||
|
{
|
||||||
|
struct cdrom_volctrl volctrl;
|
||||||
|
unsigned char buffer[32];
|
||||||
|
char mask[sizeof(buffer)];
|
||||||
|
unsigned short offset;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
cdinfo(CD_DO_IOCTL, "entering CDROMVOLUME\n");
|
||||||
|
|
||||||
|
IOCTL_IN(arg, struct cdrom_volctrl, volctrl);
|
||||||
|
|
||||||
|
cgc->buffer = buffer;
|
||||||
|
cgc->buflen = 24;
|
||||||
|
ret = cdrom_mode_sense(cdi, cgc, GPMODE_AUDIO_CTL_PAGE, 0);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
/* originally the code depended on buffer[1] to determine
|
||||||
|
how much data is available for transfer. buffer[1] is
|
||||||
|
unfortunately ambigious and the only reliable way seem
|
||||||
|
to be to simply skip over the block descriptor... */
|
||||||
|
offset = 8 + be16_to_cpu(*(__be16 *)(buffer + 6));
|
||||||
|
|
||||||
|
if (offset + 16 > sizeof(buffer))
|
||||||
|
return -E2BIG;
|
||||||
|
|
||||||
|
if (offset + 16 > cgc->buflen) {
|
||||||
|
cgc->buflen = offset + 16;
|
||||||
|
ret = cdrom_mode_sense(cdi, cgc,
|
||||||
|
GPMODE_AUDIO_CTL_PAGE, 0);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* sanity check */
|
||||||
|
if ((buffer[offset] & 0x3f) != GPMODE_AUDIO_CTL_PAGE ||
|
||||||
|
buffer[offset + 1] < 14)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
/* now we have the current volume settings. if it was only
|
||||||
|
a CDROMVOLREAD, return these values */
|
||||||
|
if (cmd == CDROMVOLREAD) {
|
||||||
|
volctrl.channel0 = buffer[offset+9];
|
||||||
|
volctrl.channel1 = buffer[offset+11];
|
||||||
|
volctrl.channel2 = buffer[offset+13];
|
||||||
|
volctrl.channel3 = buffer[offset+15];
|
||||||
|
IOCTL_OUT(arg, struct cdrom_volctrl, volctrl);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* get the volume mask */
|
||||||
|
cgc->buffer = mask;
|
||||||
|
ret = cdrom_mode_sense(cdi, cgc, GPMODE_AUDIO_CTL_PAGE, 1);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
buffer[offset + 9] = volctrl.channel0 & mask[offset + 9];
|
||||||
|
buffer[offset + 11] = volctrl.channel1 & mask[offset + 11];
|
||||||
|
buffer[offset + 13] = volctrl.channel2 & mask[offset + 13];
|
||||||
|
buffer[offset + 15] = volctrl.channel3 & mask[offset + 15];
|
||||||
|
|
||||||
|
/* set volume */
|
||||||
|
cgc->buffer = buffer + offset - 8;
|
||||||
|
memset(cgc->buffer, 0, 8);
|
||||||
|
return cdrom_mode_select(cdi, cgc);
|
||||||
|
}
|
||||||
|
|
||||||
|
static noinline int mmc_ioctl_cdrom_start_stop(struct cdrom_device_info *cdi,
|
||||||
|
struct packet_command *cgc,
|
||||||
|
int cmd)
|
||||||
|
{
|
||||||
|
struct cdrom_device_ops *cdo = cdi->ops;
|
||||||
|
cdinfo(CD_DO_IOCTL, "entering CDROMSTART/CDROMSTOP\n");
|
||||||
|
cgc->cmd[0] = GPCMD_START_STOP_UNIT;
|
||||||
|
cgc->cmd[1] = 1;
|
||||||
|
cgc->cmd[4] = (cmd == CDROMSTART) ? 1 : 0;
|
||||||
|
cgc->data_direction = CGC_DATA_NONE;
|
||||||
|
return cdo->generic_packet(cdi, cgc);
|
||||||
|
}
|
||||||
|
|
||||||
|
static noinline int mmc_ioctl_cdrom_pause_resume(struct cdrom_device_info *cdi,
|
||||||
|
struct packet_command *cgc,
|
||||||
|
int cmd)
|
||||||
|
{
|
||||||
|
struct cdrom_device_ops *cdo = cdi->ops;
|
||||||
|
cdinfo(CD_DO_IOCTL, "entering CDROMPAUSE/CDROMRESUME\n");
|
||||||
|
cgc->cmd[0] = GPCMD_PAUSE_RESUME;
|
||||||
|
cgc->cmd[8] = (cmd == CDROMRESUME) ? 1 : 0;
|
||||||
|
cgc->data_direction = CGC_DATA_NONE;
|
||||||
|
return cdo->generic_packet(cdi, cgc);
|
||||||
|
}
|
||||||
|
|
||||||
|
static noinline int mmc_ioctl_dvd_read_struct(struct cdrom_device_info *cdi,
|
||||||
|
void __user *arg,
|
||||||
|
struct packet_command *cgc)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
dvd_struct *s;
|
||||||
|
int size = sizeof(dvd_struct);
|
||||||
|
|
||||||
|
if (!CDROM_CAN(CDC_DVD))
|
||||||
|
return -ENOSYS;
|
||||||
|
|
||||||
|
s = kmalloc(size, GFP_KERNEL);
|
||||||
|
if (!s)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
cdinfo(CD_DO_IOCTL, "entering DVD_READ_STRUCT\n");
|
||||||
|
if (copy_from_user(s, arg, size)) {
|
||||||
|
kfree(s);
|
||||||
|
return -EFAULT;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = dvd_read_struct(cdi, s, cgc);
|
||||||
|
if (ret)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
if (copy_to_user(arg, s, size))
|
||||||
|
ret = -EFAULT;
|
||||||
|
out:
|
||||||
|
kfree(s);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static noinline int mmc_ioctl_dvd_auth(struct cdrom_device_info *cdi,
|
||||||
|
void __user *arg)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
dvd_authinfo ai;
|
||||||
|
if (!CDROM_CAN(CDC_DVD))
|
||||||
|
return -ENOSYS;
|
||||||
|
cdinfo(CD_DO_IOCTL, "entering DVD_AUTH\n");
|
||||||
|
IOCTL_IN(arg, dvd_authinfo, ai);
|
||||||
|
ret = dvd_do_auth(cdi, &ai);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
IOCTL_OUT(arg, dvd_authinfo, ai);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static noinline int mmc_ioctl_cdrom_next_writable(struct cdrom_device_info *cdi,
|
||||||
|
void __user *arg)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
long next = 0;
|
||||||
|
cdinfo(CD_DO_IOCTL, "entering CDROM_NEXT_WRITABLE\n");
|
||||||
|
ret = cdrom_get_next_writable(cdi, &next);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
IOCTL_OUT(arg, long, next);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static noinline int mmc_ioctl_cdrom_last_written(struct cdrom_device_info *cdi,
|
||||||
|
void __user *arg)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
long last = 0;
|
||||||
|
cdinfo(CD_DO_IOCTL, "entering CDROM_LAST_WRITTEN\n");
|
||||||
|
ret = cdrom_get_last_written(cdi, &last);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
IOCTL_OUT(arg, long, last);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int mmc_ioctl(struct cdrom_device_info *cdi, unsigned int cmd,
|
static int mmc_ioctl(struct cdrom_device_info *cdi, unsigned int cmd,
|
||||||
unsigned long arg)
|
unsigned long arg)
|
||||||
{
|
{
|
||||||
struct cdrom_device_ops *cdo = cdi->ops;
|
|
||||||
struct packet_command cgc;
|
struct packet_command cgc;
|
||||||
struct request_sense sense;
|
void __user *userptr = (void __user *)arg;
|
||||||
unsigned char buffer[32];
|
|
||||||
int ret = 0;
|
|
||||||
|
|
||||||
memset(&cgc, 0, sizeof(cgc));
|
memset(&cgc, 0, sizeof(cgc));
|
||||||
|
|
||||||
|
@ -2803,255 +3127,34 @@ static int mmc_ioctl(struct cdrom_device_info *cdi, unsigned int cmd,
|
||||||
switch (cmd) {
|
switch (cmd) {
|
||||||
case CDROMREADRAW:
|
case CDROMREADRAW:
|
||||||
case CDROMREADMODE1:
|
case CDROMREADMODE1:
|
||||||
case CDROMREADMODE2: {
|
case CDROMREADMODE2:
|
||||||
struct cdrom_msf msf;
|
return mmc_ioctl_cdrom_read_data(cdi, userptr, &cgc, cmd);
|
||||||
int blocksize = 0, format = 0, lba;
|
case CDROMREADAUDIO:
|
||||||
|
return mmc_ioctl_cdrom_read_audio(cdi, userptr);
|
||||||
switch (cmd) {
|
case CDROMSUBCHNL:
|
||||||
case CDROMREADRAW:
|
return mmc_ioctl_cdrom_subchannel(cdi, userptr);
|
||||||
blocksize = CD_FRAMESIZE_RAW;
|
case CDROMPLAYMSF:
|
||||||
break;
|
return mmc_ioctl_cdrom_play_msf(cdi, userptr, &cgc);
|
||||||
case CDROMREADMODE1:
|
case CDROMPLAYBLK:
|
||||||
blocksize = CD_FRAMESIZE;
|
return mmc_ioctl_cdrom_play_blk(cdi, userptr, &cgc);
|
||||||
format = 2;
|
|
||||||
break;
|
|
||||||
case CDROMREADMODE2:
|
|
||||||
blocksize = CD_FRAMESIZE_RAW0;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
IOCTL_IN(arg, struct cdrom_msf, msf);
|
|
||||||
lba = msf_to_lba(msf.cdmsf_min0,msf.cdmsf_sec0,msf.cdmsf_frame0);
|
|
||||||
/* FIXME: we need upper bound checking, too!! */
|
|
||||||
if (lba < 0)
|
|
||||||
return -EINVAL;
|
|
||||||
cgc.buffer = kmalloc(blocksize, GFP_KERNEL);
|
|
||||||
if (cgc.buffer == NULL)
|
|
||||||
return -ENOMEM;
|
|
||||||
memset(&sense, 0, sizeof(sense));
|
|
||||||
cgc.sense = &sense;
|
|
||||||
cgc.data_direction = CGC_DATA_READ;
|
|
||||||
ret = cdrom_read_block(cdi, &cgc, lba, 1, format, blocksize);
|
|
||||||
if (ret && sense.sense_key==0x05 && sense.asc==0x20 && sense.ascq==0x00) {
|
|
||||||
/*
|
|
||||||
* SCSI-II devices are not required to support
|
|
||||||
* READ_CD, so let's try switching block size
|
|
||||||
*/
|
|
||||||
/* FIXME: switch back again... */
|
|
||||||
if ((ret = cdrom_switch_blocksize(cdi, blocksize))) {
|
|
||||||
kfree(cgc.buffer);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
cgc.sense = NULL;
|
|
||||||
ret = cdrom_read_cd(cdi, &cgc, lba, blocksize, 1);
|
|
||||||
ret |= cdrom_switch_blocksize(cdi, blocksize);
|
|
||||||
}
|
|
||||||
if (!ret && copy_to_user((char __user *)arg, cgc.buffer, blocksize))
|
|
||||||
ret = -EFAULT;
|
|
||||||
kfree(cgc.buffer);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
case CDROMREADAUDIO: {
|
|
||||||
struct cdrom_read_audio ra;
|
|
||||||
int lba;
|
|
||||||
|
|
||||||
IOCTL_IN(arg, struct cdrom_read_audio, ra);
|
|
||||||
|
|
||||||
if (ra.addr_format == CDROM_MSF)
|
|
||||||
lba = msf_to_lba(ra.addr.msf.minute,
|
|
||||||
ra.addr.msf.second,
|
|
||||||
ra.addr.msf.frame);
|
|
||||||
else if (ra.addr_format == CDROM_LBA)
|
|
||||||
lba = ra.addr.lba;
|
|
||||||
else
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
/* FIXME: we need upper bound checking, too!! */
|
|
||||||
if (lba < 0 || ra.nframes <= 0 || ra.nframes > CD_FRAMES)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
return cdrom_read_cdda(cdi, ra.buf, lba, ra.nframes);
|
|
||||||
}
|
|
||||||
case CDROMSUBCHNL: {
|
|
||||||
struct cdrom_subchnl q;
|
|
||||||
u_char requested, back;
|
|
||||||
IOCTL_IN(arg, struct cdrom_subchnl, q);
|
|
||||||
requested = q.cdsc_format;
|
|
||||||
if (!((requested == CDROM_MSF) ||
|
|
||||||
(requested == CDROM_LBA)))
|
|
||||||
return -EINVAL;
|
|
||||||
q.cdsc_format = CDROM_MSF;
|
|
||||||
if ((ret = cdrom_read_subchannel(cdi, &q, 0)))
|
|
||||||
return ret;
|
|
||||||
back = q.cdsc_format; /* local copy */
|
|
||||||
sanitize_format(&q.cdsc_absaddr, &back, requested);
|
|
||||||
sanitize_format(&q.cdsc_reladdr, &q.cdsc_format, requested);
|
|
||||||
IOCTL_OUT(arg, struct cdrom_subchnl, q);
|
|
||||||
/* cdinfo(CD_DO_IOCTL, "CDROMSUBCHNL successful\n"); */
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
case CDROMPLAYMSF: {
|
|
||||||
struct cdrom_msf msf;
|
|
||||||
cdinfo(CD_DO_IOCTL, "entering CDROMPLAYMSF\n");
|
|
||||||
IOCTL_IN(arg, struct cdrom_msf, msf);
|
|
||||||
cgc.cmd[0] = GPCMD_PLAY_AUDIO_MSF;
|
|
||||||
cgc.cmd[3] = msf.cdmsf_min0;
|
|
||||||
cgc.cmd[4] = msf.cdmsf_sec0;
|
|
||||||
cgc.cmd[5] = msf.cdmsf_frame0;
|
|
||||||
cgc.cmd[6] = msf.cdmsf_min1;
|
|
||||||
cgc.cmd[7] = msf.cdmsf_sec1;
|
|
||||||
cgc.cmd[8] = msf.cdmsf_frame1;
|
|
||||||
cgc.data_direction = CGC_DATA_NONE;
|
|
||||||
return cdo->generic_packet(cdi, &cgc);
|
|
||||||
}
|
|
||||||
case CDROMPLAYBLK: {
|
|
||||||
struct cdrom_blk blk;
|
|
||||||
cdinfo(CD_DO_IOCTL, "entering CDROMPLAYBLK\n");
|
|
||||||
IOCTL_IN(arg, struct cdrom_blk, blk);
|
|
||||||
cgc.cmd[0] = GPCMD_PLAY_AUDIO_10;
|
|
||||||
cgc.cmd[2] = (blk.from >> 24) & 0xff;
|
|
||||||
cgc.cmd[3] = (blk.from >> 16) & 0xff;
|
|
||||||
cgc.cmd[4] = (blk.from >> 8) & 0xff;
|
|
||||||
cgc.cmd[5] = blk.from & 0xff;
|
|
||||||
cgc.cmd[7] = (blk.len >> 8) & 0xff;
|
|
||||||
cgc.cmd[8] = blk.len & 0xff;
|
|
||||||
cgc.data_direction = CGC_DATA_NONE;
|
|
||||||
return cdo->generic_packet(cdi, &cgc);
|
|
||||||
}
|
|
||||||
case CDROMVOLCTRL:
|
case CDROMVOLCTRL:
|
||||||
case CDROMVOLREAD: {
|
case CDROMVOLREAD:
|
||||||
struct cdrom_volctrl volctrl;
|
return mmc_ioctl_cdrom_volume(cdi, userptr, &cgc, cmd);
|
||||||
char mask[sizeof(buffer)];
|
|
||||||
unsigned short offset;
|
|
||||||
|
|
||||||
cdinfo(CD_DO_IOCTL, "entering CDROMVOLUME\n");
|
|
||||||
|
|
||||||
IOCTL_IN(arg, struct cdrom_volctrl, volctrl);
|
|
||||||
|
|
||||||
cgc.buffer = buffer;
|
|
||||||
cgc.buflen = 24;
|
|
||||||
if ((ret = cdrom_mode_sense(cdi, &cgc, GPMODE_AUDIO_CTL_PAGE, 0)))
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
/* originally the code depended on buffer[1] to determine
|
|
||||||
how much data is available for transfer. buffer[1] is
|
|
||||||
unfortunately ambigious and the only reliable way seem
|
|
||||||
to be to simply skip over the block descriptor... */
|
|
||||||
offset = 8 + be16_to_cpu(*(__be16 *)(buffer+6));
|
|
||||||
|
|
||||||
if (offset + 16 > sizeof(buffer))
|
|
||||||
return -E2BIG;
|
|
||||||
|
|
||||||
if (offset + 16 > cgc.buflen) {
|
|
||||||
cgc.buflen = offset+16;
|
|
||||||
ret = cdrom_mode_sense(cdi, &cgc,
|
|
||||||
GPMODE_AUDIO_CTL_PAGE, 0);
|
|
||||||
if (ret)
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* sanity check */
|
|
||||||
if ((buffer[offset] & 0x3f) != GPMODE_AUDIO_CTL_PAGE ||
|
|
||||||
buffer[offset+1] < 14)
|
|
||||||
return -EINVAL;
|
|
||||||
|
|
||||||
/* now we have the current volume settings. if it was only
|
|
||||||
a CDROMVOLREAD, return these values */
|
|
||||||
if (cmd == CDROMVOLREAD) {
|
|
||||||
volctrl.channel0 = buffer[offset+9];
|
|
||||||
volctrl.channel1 = buffer[offset+11];
|
|
||||||
volctrl.channel2 = buffer[offset+13];
|
|
||||||
volctrl.channel3 = buffer[offset+15];
|
|
||||||
IOCTL_OUT(arg, struct cdrom_volctrl, volctrl);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* get the volume mask */
|
|
||||||
cgc.buffer = mask;
|
|
||||||
if ((ret = cdrom_mode_sense(cdi, &cgc,
|
|
||||||
GPMODE_AUDIO_CTL_PAGE, 1)))
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
buffer[offset+9] = volctrl.channel0 & mask[offset+9];
|
|
||||||
buffer[offset+11] = volctrl.channel1 & mask[offset+11];
|
|
||||||
buffer[offset+13] = volctrl.channel2 & mask[offset+13];
|
|
||||||
buffer[offset+15] = volctrl.channel3 & mask[offset+15];
|
|
||||||
|
|
||||||
/* set volume */
|
|
||||||
cgc.buffer = buffer + offset - 8;
|
|
||||||
memset(cgc.buffer, 0, 8);
|
|
||||||
return cdrom_mode_select(cdi, &cgc);
|
|
||||||
}
|
|
||||||
|
|
||||||
case CDROMSTART:
|
case CDROMSTART:
|
||||||
case CDROMSTOP: {
|
case CDROMSTOP:
|
||||||
cdinfo(CD_DO_IOCTL, "entering CDROMSTART/CDROMSTOP\n");
|
return mmc_ioctl_cdrom_start_stop(cdi, &cgc, cmd);
|
||||||
cgc.cmd[0] = GPCMD_START_STOP_UNIT;
|
|
||||||
cgc.cmd[1] = 1;
|
|
||||||
cgc.cmd[4] = (cmd == CDROMSTART) ? 1 : 0;
|
|
||||||
cgc.data_direction = CGC_DATA_NONE;
|
|
||||||
return cdo->generic_packet(cdi, &cgc);
|
|
||||||
}
|
|
||||||
|
|
||||||
case CDROMPAUSE:
|
case CDROMPAUSE:
|
||||||
case CDROMRESUME: {
|
case CDROMRESUME:
|
||||||
cdinfo(CD_DO_IOCTL, "entering CDROMPAUSE/CDROMRESUME\n");
|
return mmc_ioctl_cdrom_pause_resume(cdi, &cgc, cmd);
|
||||||
cgc.cmd[0] = GPCMD_PAUSE_RESUME;
|
case DVD_READ_STRUCT:
|
||||||
cgc.cmd[8] = (cmd == CDROMRESUME) ? 1 : 0;
|
return mmc_ioctl_dvd_read_struct(cdi, userptr, &cgc);
|
||||||
cgc.data_direction = CGC_DATA_NONE;
|
case DVD_AUTH:
|
||||||
return cdo->generic_packet(cdi, &cgc);
|
return mmc_ioctl_dvd_auth(cdi, userptr);
|
||||||
}
|
case CDROM_NEXT_WRITABLE:
|
||||||
|
return mmc_ioctl_cdrom_next_writable(cdi, userptr);
|
||||||
case DVD_READ_STRUCT: {
|
case CDROM_LAST_WRITTEN:
|
||||||
dvd_struct *s;
|
return mmc_ioctl_cdrom_last_written(cdi, userptr);
|
||||||
int size = sizeof(dvd_struct);
|
}
|
||||||
if (!CDROM_CAN(CDC_DVD))
|
|
||||||
return -ENOSYS;
|
|
||||||
if ((s = kmalloc(size, GFP_KERNEL)) == NULL)
|
|
||||||
return -ENOMEM;
|
|
||||||
cdinfo(CD_DO_IOCTL, "entering DVD_READ_STRUCT\n");
|
|
||||||
if (copy_from_user(s, (dvd_struct __user *)arg, size)) {
|
|
||||||
kfree(s);
|
|
||||||
return -EFAULT;
|
|
||||||
}
|
|
||||||
if ((ret = dvd_read_struct(cdi, s))) {
|
|
||||||
kfree(s);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
if (copy_to_user((dvd_struct __user *)arg, s, size))
|
|
||||||
ret = -EFAULT;
|
|
||||||
kfree(s);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
case DVD_AUTH: {
|
|
||||||
dvd_authinfo ai;
|
|
||||||
if (!CDROM_CAN(CDC_DVD))
|
|
||||||
return -ENOSYS;
|
|
||||||
cdinfo(CD_DO_IOCTL, "entering DVD_AUTH\n");
|
|
||||||
IOCTL_IN(arg, dvd_authinfo, ai);
|
|
||||||
if ((ret = dvd_do_auth (cdi, &ai)))
|
|
||||||
return ret;
|
|
||||||
IOCTL_OUT(arg, dvd_authinfo, ai);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
case CDROM_NEXT_WRITABLE: {
|
|
||||||
long next = 0;
|
|
||||||
cdinfo(CD_DO_IOCTL, "entering CDROM_NEXT_WRITABLE\n");
|
|
||||||
if ((ret = cdrom_get_next_writable(cdi, &next)))
|
|
||||||
return ret;
|
|
||||||
IOCTL_OUT(arg, long, next);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
case CDROM_LAST_WRITTEN: {
|
|
||||||
long last = 0;
|
|
||||||
cdinfo(CD_DO_IOCTL, "entering CDROM_LAST_WRITTEN\n");
|
|
||||||
if ((ret = cdrom_get_last_written(cdi, &last)))
|
|
||||||
return ret;
|
|
||||||
IOCTL_OUT(arg, long, last);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
} /* switch */
|
|
||||||
|
|
||||||
return -ENOTTY;
|
return -ENOTTY;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1060,7 +1060,7 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
|
||||||
goto bad_page_pool;
|
goto bad_page_pool;
|
||||||
}
|
}
|
||||||
|
|
||||||
cc->bs = bioset_create(MIN_IOS, MIN_IOS);
|
cc->bs = bioset_create(MIN_IOS, 0);
|
||||||
if (!cc->bs) {
|
if (!cc->bs) {
|
||||||
ti->error = "Cannot allocate crypt bioset";
|
ti->error = "Cannot allocate crypt bioset";
|
||||||
goto bad_bs;
|
goto bad_bs;
|
||||||
|
|
|
@ -56,7 +56,7 @@ struct dm_io_client *dm_io_client_create(unsigned num_pages)
|
||||||
if (!client->pool)
|
if (!client->pool)
|
||||||
goto bad;
|
goto bad;
|
||||||
|
|
||||||
client->bios = bioset_create(16, 16);
|
client->bios = bioset_create(16, 0);
|
||||||
if (!client->bios)
|
if (!client->bios)
|
||||||
goto bad;
|
goto bad;
|
||||||
|
|
||||||
|
|
|
@ -1093,7 +1093,7 @@ static struct mapped_device *alloc_dev(int minor)
|
||||||
if (!md->tio_pool)
|
if (!md->tio_pool)
|
||||||
goto bad_tio_pool;
|
goto bad_tio_pool;
|
||||||
|
|
||||||
md->bs = bioset_create(16, 16);
|
md->bs = bioset_create(16, 0);
|
||||||
if (!md->bs)
|
if (!md->bs)
|
||||||
goto bad_no_bioset;
|
goto bad_no_bioset;
|
||||||
|
|
||||||
|
|
108
fs/aio.c
108
fs/aio.c
|
@ -191,23 +191,11 @@ static int aio_setup_ring(struct kioctx *ctx)
|
||||||
kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK), km); \
|
kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK), km); \
|
||||||
} while(0)
|
} while(0)
|
||||||
|
|
||||||
|
static void ctx_rcu_free(struct rcu_head *head)
|
||||||
/* __put_ioctx
|
|
||||||
* Called when the last user of an aio context has gone away,
|
|
||||||
* and the struct needs to be freed.
|
|
||||||
*/
|
|
||||||
static void __put_ioctx(struct kioctx *ctx)
|
|
||||||
{
|
{
|
||||||
|
struct kioctx *ctx = container_of(head, struct kioctx, rcu_head);
|
||||||
unsigned nr_events = ctx->max_reqs;
|
unsigned nr_events = ctx->max_reqs;
|
||||||
|
|
||||||
BUG_ON(ctx->reqs_active);
|
|
||||||
|
|
||||||
cancel_delayed_work(&ctx->wq);
|
|
||||||
cancel_work_sync(&ctx->wq.work);
|
|
||||||
aio_free_ring(ctx);
|
|
||||||
mmdrop(ctx->mm);
|
|
||||||
ctx->mm = NULL;
|
|
||||||
pr_debug("__put_ioctx: freeing %p\n", ctx);
|
|
||||||
kmem_cache_free(kioctx_cachep, ctx);
|
kmem_cache_free(kioctx_cachep, ctx);
|
||||||
|
|
||||||
if (nr_events) {
|
if (nr_events) {
|
||||||
|
@ -218,6 +206,23 @@ static void __put_ioctx(struct kioctx *ctx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* __put_ioctx
|
||||||
|
* Called when the last user of an aio context has gone away,
|
||||||
|
* and the struct needs to be freed.
|
||||||
|
*/
|
||||||
|
static void __put_ioctx(struct kioctx *ctx)
|
||||||
|
{
|
||||||
|
BUG_ON(ctx->reqs_active);
|
||||||
|
|
||||||
|
cancel_delayed_work(&ctx->wq);
|
||||||
|
cancel_work_sync(&ctx->wq.work);
|
||||||
|
aio_free_ring(ctx);
|
||||||
|
mmdrop(ctx->mm);
|
||||||
|
ctx->mm = NULL;
|
||||||
|
pr_debug("__put_ioctx: freeing %p\n", ctx);
|
||||||
|
call_rcu(&ctx->rcu_head, ctx_rcu_free);
|
||||||
|
}
|
||||||
|
|
||||||
#define get_ioctx(kioctx) do { \
|
#define get_ioctx(kioctx) do { \
|
||||||
BUG_ON(atomic_read(&(kioctx)->users) <= 0); \
|
BUG_ON(atomic_read(&(kioctx)->users) <= 0); \
|
||||||
atomic_inc(&(kioctx)->users); \
|
atomic_inc(&(kioctx)->users); \
|
||||||
|
@ -235,6 +240,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
|
||||||
{
|
{
|
||||||
struct mm_struct *mm;
|
struct mm_struct *mm;
|
||||||
struct kioctx *ctx;
|
struct kioctx *ctx;
|
||||||
|
int did_sync = 0;
|
||||||
|
|
||||||
/* Prevent overflows */
|
/* Prevent overflows */
|
||||||
if ((nr_events > (0x10000000U / sizeof(struct io_event))) ||
|
if ((nr_events > (0x10000000U / sizeof(struct io_event))) ||
|
||||||
|
@ -267,21 +273,30 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
|
||||||
goto out_freectx;
|
goto out_freectx;
|
||||||
|
|
||||||
/* limit the number of system wide aios */
|
/* limit the number of system wide aios */
|
||||||
spin_lock(&aio_nr_lock);
|
do {
|
||||||
if (aio_nr + ctx->max_reqs > aio_max_nr ||
|
spin_lock_bh(&aio_nr_lock);
|
||||||
aio_nr + ctx->max_reqs < aio_nr)
|
if (aio_nr + nr_events > aio_max_nr ||
|
||||||
ctx->max_reqs = 0;
|
aio_nr + nr_events < aio_nr)
|
||||||
else
|
ctx->max_reqs = 0;
|
||||||
aio_nr += ctx->max_reqs;
|
else
|
||||||
spin_unlock(&aio_nr_lock);
|
aio_nr += ctx->max_reqs;
|
||||||
|
spin_unlock_bh(&aio_nr_lock);
|
||||||
|
if (ctx->max_reqs || did_sync)
|
||||||
|
break;
|
||||||
|
|
||||||
|
/* wait for rcu callbacks to have completed before giving up */
|
||||||
|
synchronize_rcu();
|
||||||
|
did_sync = 1;
|
||||||
|
ctx->max_reqs = nr_events;
|
||||||
|
} while (1);
|
||||||
|
|
||||||
if (ctx->max_reqs == 0)
|
if (ctx->max_reqs == 0)
|
||||||
goto out_cleanup;
|
goto out_cleanup;
|
||||||
|
|
||||||
/* now link into global list. */
|
/* now link into global list. */
|
||||||
write_lock(&mm->ioctx_list_lock);
|
spin_lock(&mm->ioctx_lock);
|
||||||
ctx->next = mm->ioctx_list;
|
hlist_add_head_rcu(&ctx->list, &mm->ioctx_list);
|
||||||
mm->ioctx_list = ctx;
|
spin_unlock(&mm->ioctx_lock);
|
||||||
write_unlock(&mm->ioctx_list_lock);
|
|
||||||
|
|
||||||
dprintk("aio: allocated ioctx %p[%ld]: mm=%p mask=0x%x\n",
|
dprintk("aio: allocated ioctx %p[%ld]: mm=%p mask=0x%x\n",
|
||||||
ctx, ctx->user_id, current->mm, ctx->ring_info.nr);
|
ctx, ctx->user_id, current->mm, ctx->ring_info.nr);
|
||||||
|
@ -375,11 +390,12 @@ ssize_t wait_on_sync_kiocb(struct kiocb *iocb)
|
||||||
*/
|
*/
|
||||||
void exit_aio(struct mm_struct *mm)
|
void exit_aio(struct mm_struct *mm)
|
||||||
{
|
{
|
||||||
struct kioctx *ctx = mm->ioctx_list;
|
struct kioctx *ctx;
|
||||||
mm->ioctx_list = NULL;
|
|
||||||
while (ctx) {
|
while (!hlist_empty(&mm->ioctx_list)) {
|
||||||
struct kioctx *next = ctx->next;
|
ctx = hlist_entry(mm->ioctx_list.first, struct kioctx, list);
|
||||||
ctx->next = NULL;
|
hlist_del_rcu(&ctx->list);
|
||||||
|
|
||||||
aio_cancel_all(ctx);
|
aio_cancel_all(ctx);
|
||||||
|
|
||||||
wait_for_all_aios(ctx);
|
wait_for_all_aios(ctx);
|
||||||
|
@ -394,7 +410,6 @@ void exit_aio(struct mm_struct *mm)
|
||||||
atomic_read(&ctx->users), ctx->dead,
|
atomic_read(&ctx->users), ctx->dead,
|
||||||
ctx->reqs_active);
|
ctx->reqs_active);
|
||||||
put_ioctx(ctx);
|
put_ioctx(ctx);
|
||||||
ctx = next;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -555,19 +570,21 @@ int aio_put_req(struct kiocb *req)
|
||||||
|
|
||||||
static struct kioctx *lookup_ioctx(unsigned long ctx_id)
|
static struct kioctx *lookup_ioctx(unsigned long ctx_id)
|
||||||
{
|
{
|
||||||
struct kioctx *ioctx;
|
struct mm_struct *mm = current->mm;
|
||||||
struct mm_struct *mm;
|
struct kioctx *ctx = NULL;
|
||||||
|
struct hlist_node *n;
|
||||||
|
|
||||||
mm = current->mm;
|
rcu_read_lock();
|
||||||
read_lock(&mm->ioctx_list_lock);
|
|
||||||
for (ioctx = mm->ioctx_list; ioctx; ioctx = ioctx->next)
|
hlist_for_each_entry_rcu(ctx, n, &mm->ioctx_list, list) {
|
||||||
if (likely(ioctx->user_id == ctx_id && !ioctx->dead)) {
|
if (ctx->user_id == ctx_id && !ctx->dead) {
|
||||||
get_ioctx(ioctx);
|
get_ioctx(ctx);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
read_unlock(&mm->ioctx_list_lock);
|
}
|
||||||
|
|
||||||
return ioctx;
|
rcu_read_unlock();
|
||||||
|
return ctx;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1215,19 +1232,14 @@ static int read_events(struct kioctx *ctx,
|
||||||
static void io_destroy(struct kioctx *ioctx)
|
static void io_destroy(struct kioctx *ioctx)
|
||||||
{
|
{
|
||||||
struct mm_struct *mm = current->mm;
|
struct mm_struct *mm = current->mm;
|
||||||
struct kioctx **tmp;
|
|
||||||
int was_dead;
|
int was_dead;
|
||||||
|
|
||||||
/* delete the entry from the list is someone else hasn't already */
|
/* delete the entry from the list is someone else hasn't already */
|
||||||
write_lock(&mm->ioctx_list_lock);
|
spin_lock(&mm->ioctx_lock);
|
||||||
was_dead = ioctx->dead;
|
was_dead = ioctx->dead;
|
||||||
ioctx->dead = 1;
|
ioctx->dead = 1;
|
||||||
for (tmp = &mm->ioctx_list; *tmp && *tmp != ioctx;
|
hlist_del_rcu(&ioctx->list);
|
||||||
tmp = &(*tmp)->next)
|
spin_unlock(&mm->ioctx_lock);
|
||||||
;
|
|
||||||
if (*tmp)
|
|
||||||
*tmp = ioctx->next;
|
|
||||||
write_unlock(&mm->ioctx_list_lock);
|
|
||||||
|
|
||||||
dprintk("aio_release(%p)\n", ioctx);
|
dprintk("aio_release(%p)\n", ioctx);
|
||||||
if (likely(!was_dead))
|
if (likely(!was_dead))
|
||||||
|
|
|
@ -111,7 +111,7 @@ void bio_integrity_free(struct bio *bio, struct bio_set *bs)
|
||||||
&& bip->bip_buf != NULL)
|
&& bip->bip_buf != NULL)
|
||||||
kfree(bip->bip_buf);
|
kfree(bip->bip_buf);
|
||||||
|
|
||||||
mempool_free(bip->bip_vec, bs->bvec_pools[bip->bip_pool]);
|
bvec_free_bs(bs, bip->bip_vec, bip->bip_pool);
|
||||||
mempool_free(bip, bs->bio_integrity_pool);
|
mempool_free(bip, bs->bio_integrity_pool);
|
||||||
|
|
||||||
bio->bi_integrity = NULL;
|
bio->bi_integrity = NULL;
|
||||||
|
|
324
fs/bio.c
324
fs/bio.c
|
@ -31,7 +31,11 @@
|
||||||
|
|
||||||
DEFINE_TRACE(block_split);
|
DEFINE_TRACE(block_split);
|
||||||
|
|
||||||
static struct kmem_cache *bio_slab __read_mostly;
|
/*
|
||||||
|
* Test patch to inline a certain number of bi_io_vec's inside the bio
|
||||||
|
* itself, to shrink a bio data allocation from two mempool calls to one
|
||||||
|
*/
|
||||||
|
#define BIO_INLINE_VECS 4
|
||||||
|
|
||||||
static mempool_t *bio_split_pool __read_mostly;
|
static mempool_t *bio_split_pool __read_mostly;
|
||||||
|
|
||||||
|
@ -40,9 +44,8 @@ static mempool_t *bio_split_pool __read_mostly;
|
||||||
* break badly! cannot be bigger than what you can fit into an
|
* break badly! cannot be bigger than what you can fit into an
|
||||||
* unsigned short
|
* unsigned short
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) }
|
#define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) }
|
||||||
static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = {
|
struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = {
|
||||||
BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES),
|
BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES),
|
||||||
};
|
};
|
||||||
#undef BV
|
#undef BV
|
||||||
|
@ -53,12 +56,121 @@ static struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly = {
|
||||||
*/
|
*/
|
||||||
struct bio_set *fs_bio_set;
|
struct bio_set *fs_bio_set;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Our slab pool management
|
||||||
|
*/
|
||||||
|
struct bio_slab {
|
||||||
|
struct kmem_cache *slab;
|
||||||
|
unsigned int slab_ref;
|
||||||
|
unsigned int slab_size;
|
||||||
|
char name[8];
|
||||||
|
};
|
||||||
|
static DEFINE_MUTEX(bio_slab_lock);
|
||||||
|
static struct bio_slab *bio_slabs;
|
||||||
|
static unsigned int bio_slab_nr, bio_slab_max;
|
||||||
|
|
||||||
|
static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
|
||||||
|
{
|
||||||
|
unsigned int sz = sizeof(struct bio) + extra_size;
|
||||||
|
struct kmem_cache *slab = NULL;
|
||||||
|
struct bio_slab *bslab;
|
||||||
|
unsigned int i, entry = -1;
|
||||||
|
|
||||||
|
mutex_lock(&bio_slab_lock);
|
||||||
|
|
||||||
|
i = 0;
|
||||||
|
while (i < bio_slab_nr) {
|
||||||
|
struct bio_slab *bslab = &bio_slabs[i];
|
||||||
|
|
||||||
|
if (!bslab->slab && entry == -1)
|
||||||
|
entry = i;
|
||||||
|
else if (bslab->slab_size == sz) {
|
||||||
|
slab = bslab->slab;
|
||||||
|
bslab->slab_ref++;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
i++;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (slab)
|
||||||
|
goto out_unlock;
|
||||||
|
|
||||||
|
if (bio_slab_nr == bio_slab_max && entry == -1) {
|
||||||
|
bio_slab_max <<= 1;
|
||||||
|
bio_slabs = krealloc(bio_slabs,
|
||||||
|
bio_slab_max * sizeof(struct bio_slab),
|
||||||
|
GFP_KERNEL);
|
||||||
|
if (!bio_slabs)
|
||||||
|
goto out_unlock;
|
||||||
|
}
|
||||||
|
if (entry == -1)
|
||||||
|
entry = bio_slab_nr++;
|
||||||
|
|
||||||
|
bslab = &bio_slabs[entry];
|
||||||
|
|
||||||
|
snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry);
|
||||||
|
slab = kmem_cache_create(bslab->name, sz, 0, SLAB_HWCACHE_ALIGN, NULL);
|
||||||
|
if (!slab)
|
||||||
|
goto out_unlock;
|
||||||
|
|
||||||
|
printk("bio: create slab <%s> at %d\n", bslab->name, entry);
|
||||||
|
bslab->slab = slab;
|
||||||
|
bslab->slab_ref = 1;
|
||||||
|
bslab->slab_size = sz;
|
||||||
|
out_unlock:
|
||||||
|
mutex_unlock(&bio_slab_lock);
|
||||||
|
return slab;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void bio_put_slab(struct bio_set *bs)
|
||||||
|
{
|
||||||
|
struct bio_slab *bslab = NULL;
|
||||||
|
unsigned int i;
|
||||||
|
|
||||||
|
mutex_lock(&bio_slab_lock);
|
||||||
|
|
||||||
|
for (i = 0; i < bio_slab_nr; i++) {
|
||||||
|
if (bs->bio_slab == bio_slabs[i].slab) {
|
||||||
|
bslab = &bio_slabs[i];
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
WARN_ON(!bslab->slab_ref);
|
||||||
|
|
||||||
|
if (--bslab->slab_ref)
|
||||||
|
goto out;
|
||||||
|
|
||||||
|
kmem_cache_destroy(bslab->slab);
|
||||||
|
bslab->slab = NULL;
|
||||||
|
|
||||||
|
out:
|
||||||
|
mutex_unlock(&bio_slab_lock);
|
||||||
|
}
|
||||||
|
|
||||||
unsigned int bvec_nr_vecs(unsigned short idx)
|
unsigned int bvec_nr_vecs(unsigned short idx)
|
||||||
{
|
{
|
||||||
return bvec_slabs[idx].nr_vecs;
|
return bvec_slabs[idx].nr_vecs;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx, struct bio_set *bs)
|
void bvec_free_bs(struct bio_set *bs, struct bio_vec *bv, unsigned int idx)
|
||||||
|
{
|
||||||
|
BIO_BUG_ON(idx >= BIOVEC_NR_POOLS);
|
||||||
|
|
||||||
|
if (idx == BIOVEC_MAX_IDX)
|
||||||
|
mempool_free(bv, bs->bvec_pool);
|
||||||
|
else {
|
||||||
|
struct biovec_slab *bvs = bvec_slabs + idx;
|
||||||
|
|
||||||
|
kmem_cache_free(bvs->slab, bv);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx,
|
||||||
|
struct bio_set *bs)
|
||||||
{
|
{
|
||||||
struct bio_vec *bvl;
|
struct bio_vec *bvl;
|
||||||
|
|
||||||
|
@ -67,60 +179,85 @@ struct bio_vec *bvec_alloc_bs(gfp_t gfp_mask, int nr, unsigned long *idx, struct
|
||||||
* If not, this is a bio_kmalloc() allocation and just do a
|
* If not, this is a bio_kmalloc() allocation and just do a
|
||||||
* kzalloc() for the exact number of vecs right away.
|
* kzalloc() for the exact number of vecs right away.
|
||||||
*/
|
*/
|
||||||
if (bs) {
|
if (!bs)
|
||||||
/*
|
bvl = kmalloc(nr * sizeof(struct bio_vec), gfp_mask);
|
||||||
* see comment near bvec_array define!
|
|
||||||
*/
|
/*
|
||||||
switch (nr) {
|
* see comment near bvec_array define!
|
||||||
case 1:
|
*/
|
||||||
*idx = 0;
|
switch (nr) {
|
||||||
break;
|
case 1:
|
||||||
case 2 ... 4:
|
*idx = 0;
|
||||||
*idx = 1;
|
break;
|
||||||
break;
|
case 2 ... 4:
|
||||||
case 5 ... 16:
|
*idx = 1;
|
||||||
*idx = 2;
|
break;
|
||||||
break;
|
case 5 ... 16:
|
||||||
case 17 ... 64:
|
*idx = 2;
|
||||||
*idx = 3;
|
break;
|
||||||
break;
|
case 17 ... 64:
|
||||||
case 65 ... 128:
|
*idx = 3;
|
||||||
*idx = 4;
|
break;
|
||||||
break;
|
case 65 ... 128:
|
||||||
case 129 ... BIO_MAX_PAGES:
|
*idx = 4;
|
||||||
*idx = 5;
|
break;
|
||||||
break;
|
case 129 ... BIO_MAX_PAGES:
|
||||||
default:
|
*idx = 5;
|
||||||
return NULL;
|
break;
|
||||||
}
|
default:
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* idx now points to the pool we want to allocate from. only the
|
||||||
|
* 1-vec entry pool is mempool backed.
|
||||||
|
*/
|
||||||
|
if (*idx == BIOVEC_MAX_IDX) {
|
||||||
|
fallback:
|
||||||
|
bvl = mempool_alloc(bs->bvec_pool, gfp_mask);
|
||||||
|
} else {
|
||||||
|
struct biovec_slab *bvs = bvec_slabs + *idx;
|
||||||
|
gfp_t __gfp_mask = gfp_mask & ~(__GFP_WAIT | __GFP_IO);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* idx now points to the pool we want to allocate from
|
* Make this allocation restricted and don't dump info on
|
||||||
|
* allocation failures, since we'll fallback to the mempool
|
||||||
|
* in case of failure.
|
||||||
*/
|
*/
|
||||||
bvl = mempool_alloc(bs->bvec_pools[*idx], gfp_mask);
|
__gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
|
||||||
if (bvl)
|
|
||||||
memset(bvl, 0,
|
/*
|
||||||
bvec_nr_vecs(*idx) * sizeof(struct bio_vec));
|
* Try a slab allocation. If this fails and __GFP_WAIT
|
||||||
} else
|
* is set, retry with the 1-entry mempool
|
||||||
bvl = kzalloc(nr * sizeof(struct bio_vec), gfp_mask);
|
*/
|
||||||
|
bvl = kmem_cache_alloc(bvs->slab, __gfp_mask);
|
||||||
|
if (unlikely(!bvl && (gfp_mask & __GFP_WAIT))) {
|
||||||
|
*idx = BIOVEC_MAX_IDX;
|
||||||
|
goto fallback;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return bvl;
|
return bvl;
|
||||||
}
|
}
|
||||||
|
|
||||||
void bio_free(struct bio *bio, struct bio_set *bio_set)
|
void bio_free(struct bio *bio, struct bio_set *bs)
|
||||||
{
|
{
|
||||||
if (bio->bi_io_vec) {
|
void *p;
|
||||||
const int pool_idx = BIO_POOL_IDX(bio);
|
|
||||||
|
|
||||||
BIO_BUG_ON(pool_idx >= BIOVEC_NR_POOLS);
|
if (bio_has_allocated_vec(bio))
|
||||||
|
bvec_free_bs(bs, bio->bi_io_vec, BIO_POOL_IDX(bio));
|
||||||
mempool_free(bio->bi_io_vec, bio_set->bvec_pools[pool_idx]);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (bio_integrity(bio))
|
if (bio_integrity(bio))
|
||||||
bio_integrity_free(bio, bio_set);
|
bio_integrity_free(bio, bs);
|
||||||
|
|
||||||
mempool_free(bio, bio_set->bio_pool);
|
/*
|
||||||
|
* If we have front padding, adjust the bio pointer before freeing
|
||||||
|
*/
|
||||||
|
p = bio;
|
||||||
|
if (bs->front_pad)
|
||||||
|
p -= bs->front_pad;
|
||||||
|
|
||||||
|
mempool_free(p, bs->bio_pool);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -133,7 +270,8 @@ static void bio_fs_destructor(struct bio *bio)
|
||||||
|
|
||||||
static void bio_kmalloc_destructor(struct bio *bio)
|
static void bio_kmalloc_destructor(struct bio *bio)
|
||||||
{
|
{
|
||||||
kfree(bio->bi_io_vec);
|
if (bio_has_allocated_vec(bio))
|
||||||
|
kfree(bio->bi_io_vec);
|
||||||
kfree(bio);
|
kfree(bio);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -157,16 +295,20 @@ void bio_init(struct bio *bio)
|
||||||
* for a &struct bio to become free. If a %NULL @bs is passed in, we will
|
* for a &struct bio to become free. If a %NULL @bs is passed in, we will
|
||||||
* fall back to just using @kmalloc to allocate the required memory.
|
* fall back to just using @kmalloc to allocate the required memory.
|
||||||
*
|
*
|
||||||
* allocate bio and iovecs from the memory pools specified by the
|
* Note that the caller must set ->bi_destructor on succesful return
|
||||||
* bio_set structure, or @kmalloc if none given.
|
* of a bio, to do the appropriate freeing of the bio once the reference
|
||||||
|
* count drops to zero.
|
||||||
**/
|
**/
|
||||||
struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
|
struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
|
||||||
{
|
{
|
||||||
struct bio *bio;
|
struct bio *bio = NULL;
|
||||||
|
|
||||||
if (bs)
|
if (bs) {
|
||||||
bio = mempool_alloc(bs->bio_pool, gfp_mask);
|
void *p = mempool_alloc(bs->bio_pool, gfp_mask);
|
||||||
else
|
|
||||||
|
if (p)
|
||||||
|
bio = p + bs->front_pad;
|
||||||
|
} else
|
||||||
bio = kmalloc(sizeof(*bio), gfp_mask);
|
bio = kmalloc(sizeof(*bio), gfp_mask);
|
||||||
|
|
||||||
if (likely(bio)) {
|
if (likely(bio)) {
|
||||||
|
@ -176,7 +318,15 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
|
||||||
if (likely(nr_iovecs)) {
|
if (likely(nr_iovecs)) {
|
||||||
unsigned long uninitialized_var(idx);
|
unsigned long uninitialized_var(idx);
|
||||||
|
|
||||||
bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx, bs);
|
if (nr_iovecs <= BIO_INLINE_VECS) {
|
||||||
|
idx = 0;
|
||||||
|
bvl = bio->bi_inline_vecs;
|
||||||
|
nr_iovecs = BIO_INLINE_VECS;
|
||||||
|
} else {
|
||||||
|
bvl = bvec_alloc_bs(gfp_mask, nr_iovecs, &idx,
|
||||||
|
bs);
|
||||||
|
nr_iovecs = bvec_nr_vecs(idx);
|
||||||
|
}
|
||||||
if (unlikely(!bvl)) {
|
if (unlikely(!bvl)) {
|
||||||
if (bs)
|
if (bs)
|
||||||
mempool_free(bio, bs->bio_pool);
|
mempool_free(bio, bs->bio_pool);
|
||||||
|
@ -186,7 +336,7 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
bio->bi_flags |= idx << BIO_POOL_OFFSET;
|
bio->bi_flags |= idx << BIO_POOL_OFFSET;
|
||||||
bio->bi_max_vecs = bvec_nr_vecs(idx);
|
bio->bi_max_vecs = nr_iovecs;
|
||||||
}
|
}
|
||||||
bio->bi_io_vec = bvl;
|
bio->bi_io_vec = bvl;
|
||||||
}
|
}
|
||||||
|
@ -1346,30 +1496,18 @@ EXPORT_SYMBOL(bio_sector_offset);
|
||||||
*/
|
*/
|
||||||
static int biovec_create_pools(struct bio_set *bs, int pool_entries)
|
static int biovec_create_pools(struct bio_set *bs, int pool_entries)
|
||||||
{
|
{
|
||||||
int i;
|
struct biovec_slab *bp = bvec_slabs + BIOVEC_MAX_IDX;
|
||||||
|
|
||||||
for (i = 0; i < BIOVEC_NR_POOLS; i++) {
|
bs->bvec_pool = mempool_create_slab_pool(pool_entries, bp->slab);
|
||||||
struct biovec_slab *bp = bvec_slabs + i;
|
if (!bs->bvec_pool)
|
||||||
mempool_t **bvp = bs->bvec_pools + i;
|
return -ENOMEM;
|
||||||
|
|
||||||
*bvp = mempool_create_slab_pool(pool_entries, bp->slab);
|
|
||||||
if (!*bvp)
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void biovec_free_pools(struct bio_set *bs)
|
static void biovec_free_pools(struct bio_set *bs)
|
||||||
{
|
{
|
||||||
int i;
|
mempool_destroy(bs->bvec_pool);
|
||||||
|
|
||||||
for (i = 0; i < BIOVEC_NR_POOLS; i++) {
|
|
||||||
mempool_t *bvp = bs->bvec_pools[i];
|
|
||||||
|
|
||||||
if (bvp)
|
|
||||||
mempool_destroy(bvp);
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void bioset_free(struct bio_set *bs)
|
void bioset_free(struct bio_set *bs)
|
||||||
|
@ -1379,25 +1517,49 @@ void bioset_free(struct bio_set *bs)
|
||||||
|
|
||||||
bioset_integrity_free(bs);
|
bioset_integrity_free(bs);
|
||||||
biovec_free_pools(bs);
|
biovec_free_pools(bs);
|
||||||
|
bio_put_slab(bs);
|
||||||
|
|
||||||
kfree(bs);
|
kfree(bs);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct bio_set *bioset_create(int bio_pool_size, int bvec_pool_size)
|
/**
|
||||||
|
* bioset_create - Create a bio_set
|
||||||
|
* @pool_size: Number of bio and bio_vecs to cache in the mempool
|
||||||
|
* @front_pad: Number of bytes to allocate in front of the returned bio
|
||||||
|
*
|
||||||
|
* Description:
|
||||||
|
* Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller
|
||||||
|
* to ask for a number of bytes to be allocated in front of the bio.
|
||||||
|
* Front pad allocation is useful for embedding the bio inside
|
||||||
|
* another structure, to avoid allocating extra data to go with the bio.
|
||||||
|
* Note that the bio must be embedded at the END of that structure always,
|
||||||
|
* or things will break badly.
|
||||||
|
*/
|
||||||
|
struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad)
|
||||||
{
|
{
|
||||||
struct bio_set *bs = kzalloc(sizeof(*bs), GFP_KERNEL);
|
unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
|
||||||
|
struct bio_set *bs;
|
||||||
|
|
||||||
|
bs = kzalloc(sizeof(*bs), GFP_KERNEL);
|
||||||
if (!bs)
|
if (!bs)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
bs->bio_pool = mempool_create_slab_pool(bio_pool_size, bio_slab);
|
bs->front_pad = front_pad;
|
||||||
|
|
||||||
|
bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad);
|
||||||
|
if (!bs->bio_slab) {
|
||||||
|
kfree(bs);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
bs->bio_pool = mempool_create_slab_pool(pool_size, bs->bio_slab);
|
||||||
if (!bs->bio_pool)
|
if (!bs->bio_pool)
|
||||||
goto bad;
|
goto bad;
|
||||||
|
|
||||||
if (bioset_integrity_create(bs, bio_pool_size))
|
if (bioset_integrity_create(bs, pool_size))
|
||||||
goto bad;
|
goto bad;
|
||||||
|
|
||||||
if (!biovec_create_pools(bs, bvec_pool_size))
|
if (!biovec_create_pools(bs, pool_size))
|
||||||
return bs;
|
return bs;
|
||||||
|
|
||||||
bad:
|
bad:
|
||||||
|
@ -1421,12 +1583,16 @@ static void __init biovec_init_slabs(void)
|
||||||
|
|
||||||
static int __init init_bio(void)
|
static int __init init_bio(void)
|
||||||
{
|
{
|
||||||
bio_slab = KMEM_CACHE(bio, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
|
bio_slab_max = 2;
|
||||||
|
bio_slab_nr = 0;
|
||||||
|
bio_slabs = kzalloc(bio_slab_max * sizeof(struct bio_slab), GFP_KERNEL);
|
||||||
|
if (!bio_slabs)
|
||||||
|
panic("bio: can't allocate bios\n");
|
||||||
|
|
||||||
bio_integrity_init_slab();
|
bio_integrity_init_slab();
|
||||||
biovec_init_slabs();
|
biovec_init_slabs();
|
||||||
|
|
||||||
fs_bio_set = bioset_create(BIO_POOL_SIZE, 2);
|
fs_bio_set = bioset_create(BIO_POOL_SIZE, 0);
|
||||||
if (!fs_bio_set)
|
if (!fs_bio_set)
|
||||||
panic("bio: can't allocate bios\n");
|
panic("bio: can't allocate bios\n");
|
||||||
|
|
||||||
|
|
19
fs/buffer.c
19
fs/buffer.c
|
@ -99,10 +99,18 @@ __clear_page_buffers(struct page *page)
|
||||||
page_cache_release(page);
|
page_cache_release(page);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static int quiet_error(struct buffer_head *bh)
|
||||||
|
{
|
||||||
|
if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit())
|
||||||
|
return 0;
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
static void buffer_io_error(struct buffer_head *bh)
|
static void buffer_io_error(struct buffer_head *bh)
|
||||||
{
|
{
|
||||||
char b[BDEVNAME_SIZE];
|
char b[BDEVNAME_SIZE];
|
||||||
|
|
||||||
printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
|
printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n",
|
||||||
bdevname(bh->b_bdev, b),
|
bdevname(bh->b_bdev, b),
|
||||||
(unsigned long long)bh->b_blocknr);
|
(unsigned long long)bh->b_blocknr);
|
||||||
|
@ -144,7 +152,7 @@ void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
|
||||||
if (uptodate) {
|
if (uptodate) {
|
||||||
set_buffer_uptodate(bh);
|
set_buffer_uptodate(bh);
|
||||||
} else {
|
} else {
|
||||||
if (!buffer_eopnotsupp(bh) && printk_ratelimit()) {
|
if (!buffer_eopnotsupp(bh) && !quiet_error(bh)) {
|
||||||
buffer_io_error(bh);
|
buffer_io_error(bh);
|
||||||
printk(KERN_WARNING "lost page write due to "
|
printk(KERN_WARNING "lost page write due to "
|
||||||
"I/O error on %s\n",
|
"I/O error on %s\n",
|
||||||
|
@ -394,7 +402,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
|
||||||
set_buffer_uptodate(bh);
|
set_buffer_uptodate(bh);
|
||||||
} else {
|
} else {
|
||||||
clear_buffer_uptodate(bh);
|
clear_buffer_uptodate(bh);
|
||||||
if (printk_ratelimit())
|
if (!quiet_error(bh))
|
||||||
buffer_io_error(bh);
|
buffer_io_error(bh);
|
||||||
SetPageError(page);
|
SetPageError(page);
|
||||||
}
|
}
|
||||||
|
@ -455,7 +463,7 @@ static void end_buffer_async_write(struct buffer_head *bh, int uptodate)
|
||||||
if (uptodate) {
|
if (uptodate) {
|
||||||
set_buffer_uptodate(bh);
|
set_buffer_uptodate(bh);
|
||||||
} else {
|
} else {
|
||||||
if (printk_ratelimit()) {
|
if (!quiet_error(bh)) {
|
||||||
buffer_io_error(bh);
|
buffer_io_error(bh);
|
||||||
printk(KERN_WARNING "lost page write due to "
|
printk(KERN_WARNING "lost page write due to "
|
||||||
"I/O error on %s\n",
|
"I/O error on %s\n",
|
||||||
|
@ -2913,6 +2921,9 @@ static void end_bio_bh_io_sync(struct bio *bio, int err)
|
||||||
set_bit(BH_Eopnotsupp, &bh->b_state);
|
set_bit(BH_Eopnotsupp, &bh->b_state);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags)))
|
||||||
|
set_bit(BH_Quiet, &bh->b_state);
|
||||||
|
|
||||||
bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
|
bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
|
||||||
bio_put(bio);
|
bio_put(bio);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1721,7 +1721,7 @@ static loff_t ext4_max_size(int blkbits, int has_huge_files)
|
||||||
/* small i_blocks in vfs inode? */
|
/* small i_blocks in vfs inode? */
|
||||||
if (!has_huge_files || sizeof(blkcnt_t) < sizeof(u64)) {
|
if (!has_huge_files || sizeof(blkcnt_t) < sizeof(u64)) {
|
||||||
/*
|
/*
|
||||||
* CONFIG_LSF is not enabled implies the inode
|
* CONFIG_LBD is not enabled implies the inode
|
||||||
* i_block represent total blocks in 512 bytes
|
* i_block represent total blocks in 512 bytes
|
||||||
* 32 == size of vfs inode i_blocks * 8
|
* 32 == size of vfs inode i_blocks * 8
|
||||||
*/
|
*/
|
||||||
|
@ -1764,7 +1764,7 @@ static loff_t ext4_max_bitmap_size(int bits, int has_huge_files)
|
||||||
|
|
||||||
if (!has_huge_files || sizeof(blkcnt_t) < sizeof(u64)) {
|
if (!has_huge_files || sizeof(blkcnt_t) < sizeof(u64)) {
|
||||||
/*
|
/*
|
||||||
* !has_huge_files or CONFIG_LSF is not enabled
|
* !has_huge_files or CONFIG_LBD is not enabled
|
||||||
* implies the inode i_block represent total blocks in
|
* implies the inode i_block represent total blocks in
|
||||||
* 512 bytes 32 == size of vfs inode i_blocks * 8
|
* 512 bytes 32 == size of vfs inode i_blocks * 8
|
||||||
*/
|
*/
|
||||||
|
@ -2021,13 +2021,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
|
||||||
if (has_huge_files) {
|
if (has_huge_files) {
|
||||||
/*
|
/*
|
||||||
* Large file size enabled file system can only be
|
* Large file size enabled file system can only be
|
||||||
* mount if kernel is build with CONFIG_LSF
|
* mount if kernel is build with CONFIG_LBD
|
||||||
*/
|
*/
|
||||||
if (sizeof(root->i_blocks) < sizeof(u64) &&
|
if (sizeof(root->i_blocks) < sizeof(u64) &&
|
||||||
!(sb->s_flags & MS_RDONLY)) {
|
!(sb->s_flags & MS_RDONLY)) {
|
||||||
printk(KERN_ERR "EXT4-fs: %s: Filesystem with huge "
|
printk(KERN_ERR "EXT4-fs: %s: Filesystem with huge "
|
||||||
"files cannot be mounted read-write "
|
"files cannot be mounted read-write "
|
||||||
"without CONFIG_LSF.\n", sb->s_id);
|
"without CONFIG_LBD.\n", sb->s_id);
|
||||||
goto failed_mount;
|
goto failed_mount;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,6 +5,7 @@
|
||||||
#include <linux/workqueue.h>
|
#include <linux/workqueue.h>
|
||||||
#include <linux/aio_abi.h>
|
#include <linux/aio_abi.h>
|
||||||
#include <linux/uio.h>
|
#include <linux/uio.h>
|
||||||
|
#include <linux/rcupdate.h>
|
||||||
|
|
||||||
#include <asm/atomic.h>
|
#include <asm/atomic.h>
|
||||||
|
|
||||||
|
@ -183,7 +184,7 @@ struct kioctx {
|
||||||
|
|
||||||
/* This needs improving */
|
/* This needs improving */
|
||||||
unsigned long user_id;
|
unsigned long user_id;
|
||||||
struct kioctx *next;
|
struct hlist_node list;
|
||||||
|
|
||||||
wait_queue_head_t wait;
|
wait_queue_head_t wait;
|
||||||
|
|
||||||
|
@ -199,6 +200,8 @@ struct kioctx {
|
||||||
struct aio_ring_info ring_info;
|
struct aio_ring_info ring_info;
|
||||||
|
|
||||||
struct delayed_work wq;
|
struct delayed_work wq;
|
||||||
|
|
||||||
|
struct rcu_head rcu_head;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* prototypes */
|
/* prototypes */
|
||||||
|
|
|
@ -90,10 +90,11 @@ struct bio {
|
||||||
|
|
||||||
unsigned int bi_comp_cpu; /* completion CPU */
|
unsigned int bi_comp_cpu; /* completion CPU */
|
||||||
|
|
||||||
|
atomic_t bi_cnt; /* pin count */
|
||||||
|
|
||||||
struct bio_vec *bi_io_vec; /* the actual vec list */
|
struct bio_vec *bi_io_vec; /* the actual vec list */
|
||||||
|
|
||||||
bio_end_io_t *bi_end_io;
|
bio_end_io_t *bi_end_io;
|
||||||
atomic_t bi_cnt; /* pin count */
|
|
||||||
|
|
||||||
void *bi_private;
|
void *bi_private;
|
||||||
#if defined(CONFIG_BLK_DEV_INTEGRITY)
|
#if defined(CONFIG_BLK_DEV_INTEGRITY)
|
||||||
|
@ -101,6 +102,13 @@ struct bio {
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
bio_destructor_t *bi_destructor; /* destructor */
|
bio_destructor_t *bi_destructor; /* destructor */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We can inline a number of vecs at the end of the bio, to avoid
|
||||||
|
* double allocations for a small number of bio_vecs. This member
|
||||||
|
* MUST obviously be kept at the very end of the bio.
|
||||||
|
*/
|
||||||
|
struct bio_vec bi_inline_vecs[0];
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -117,6 +125,7 @@ struct bio {
|
||||||
#define BIO_CPU_AFFINE 8 /* complete bio on same CPU as submitted */
|
#define BIO_CPU_AFFINE 8 /* complete bio on same CPU as submitted */
|
||||||
#define BIO_NULL_MAPPED 9 /* contains invalid user pages */
|
#define BIO_NULL_MAPPED 9 /* contains invalid user pages */
|
||||||
#define BIO_FS_INTEGRITY 10 /* fs owns integrity data, not block layer */
|
#define BIO_FS_INTEGRITY 10 /* fs owns integrity data, not block layer */
|
||||||
|
#define BIO_QUIET 11 /* Make BIO Quiet */
|
||||||
#define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag)))
|
#define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag)))
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -211,6 +220,11 @@ static inline void *bio_data(struct bio *bio)
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int bio_has_allocated_vec(struct bio *bio)
|
||||||
|
{
|
||||||
|
return bio->bi_io_vec && bio->bi_io_vec != bio->bi_inline_vecs;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* will die
|
* will die
|
||||||
*/
|
*/
|
||||||
|
@ -332,7 +346,7 @@ struct bio_pair {
|
||||||
extern struct bio_pair *bio_split(struct bio *bi, int first_sectors);
|
extern struct bio_pair *bio_split(struct bio *bi, int first_sectors);
|
||||||
extern void bio_pair_release(struct bio_pair *dbio);
|
extern void bio_pair_release(struct bio_pair *dbio);
|
||||||
|
|
||||||
extern struct bio_set *bioset_create(int, int);
|
extern struct bio_set *bioset_create(unsigned int, unsigned int);
|
||||||
extern void bioset_free(struct bio_set *);
|
extern void bioset_free(struct bio_set *);
|
||||||
|
|
||||||
extern struct bio *bio_alloc(gfp_t, int);
|
extern struct bio *bio_alloc(gfp_t, int);
|
||||||
|
@ -377,6 +391,7 @@ extern struct bio *bio_copy_user_iov(struct request_queue *,
|
||||||
extern int bio_uncopy_user(struct bio *);
|
extern int bio_uncopy_user(struct bio *);
|
||||||
void zero_fill_bio(struct bio *bio);
|
void zero_fill_bio(struct bio *bio);
|
||||||
extern struct bio_vec *bvec_alloc_bs(gfp_t, int, unsigned long *, struct bio_set *);
|
extern struct bio_vec *bvec_alloc_bs(gfp_t, int, unsigned long *, struct bio_set *);
|
||||||
|
extern void bvec_free_bs(struct bio_set *, struct bio_vec *, unsigned int);
|
||||||
extern unsigned int bvec_nr_vecs(unsigned short idx);
|
extern unsigned int bvec_nr_vecs(unsigned short idx);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -395,13 +410,17 @@ static inline void bio_set_completion_cpu(struct bio *bio, unsigned int cpu)
|
||||||
*/
|
*/
|
||||||
#define BIO_POOL_SIZE 2
|
#define BIO_POOL_SIZE 2
|
||||||
#define BIOVEC_NR_POOLS 6
|
#define BIOVEC_NR_POOLS 6
|
||||||
|
#define BIOVEC_MAX_IDX (BIOVEC_NR_POOLS - 1)
|
||||||
|
|
||||||
struct bio_set {
|
struct bio_set {
|
||||||
|
struct kmem_cache *bio_slab;
|
||||||
|
unsigned int front_pad;
|
||||||
|
|
||||||
mempool_t *bio_pool;
|
mempool_t *bio_pool;
|
||||||
#if defined(CONFIG_BLK_DEV_INTEGRITY)
|
#if defined(CONFIG_BLK_DEV_INTEGRITY)
|
||||||
mempool_t *bio_integrity_pool;
|
mempool_t *bio_integrity_pool;
|
||||||
#endif
|
#endif
|
||||||
mempool_t *bvec_pools[BIOVEC_NR_POOLS];
|
mempool_t *bvec_pool;
|
||||||
};
|
};
|
||||||
|
|
||||||
struct biovec_slab {
|
struct biovec_slab {
|
||||||
|
@ -411,6 +430,7 @@ struct biovec_slab {
|
||||||
};
|
};
|
||||||
|
|
||||||
extern struct bio_set *fs_bio_set;
|
extern struct bio_set *fs_bio_set;
|
||||||
|
extern struct biovec_slab bvec_slabs[BIOVEC_NR_POOLS] __read_mostly;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* a small number of entries is fine, not going to be performance critical.
|
* a small number of entries is fine, not going to be performance critical.
|
||||||
|
|
|
@ -26,7 +26,6 @@ struct scsi_ioctl_command;
|
||||||
|
|
||||||
struct request_queue;
|
struct request_queue;
|
||||||
struct elevator_queue;
|
struct elevator_queue;
|
||||||
typedef struct elevator_queue elevator_t;
|
|
||||||
struct request_pm_state;
|
struct request_pm_state;
|
||||||
struct blk_trace;
|
struct blk_trace;
|
||||||
struct request;
|
struct request;
|
||||||
|
@ -313,7 +312,7 @@ struct request_queue
|
||||||
*/
|
*/
|
||||||
struct list_head queue_head;
|
struct list_head queue_head;
|
||||||
struct request *last_merge;
|
struct request *last_merge;
|
||||||
elevator_t *elevator;
|
struct elevator_queue *elevator;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* the queue request freelist, one for reads and one for writes
|
* the queue request freelist, one for reads and one for writes
|
||||||
|
@ -449,6 +448,7 @@ struct request_queue
|
||||||
#define QUEUE_FLAG_FAIL_IO 12 /* fake timeout */
|
#define QUEUE_FLAG_FAIL_IO 12 /* fake timeout */
|
||||||
#define QUEUE_FLAG_STACKABLE 13 /* supports request stacking */
|
#define QUEUE_FLAG_STACKABLE 13 /* supports request stacking */
|
||||||
#define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */
|
#define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */
|
||||||
|
#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
|
||||||
|
|
||||||
static inline int queue_is_locked(struct request_queue *q)
|
static inline int queue_is_locked(struct request_queue *q)
|
||||||
{
|
{
|
||||||
|
@ -522,22 +522,32 @@ enum {
|
||||||
* TAG_FLUSH : ordering by tag w/ pre and post flushes
|
* TAG_FLUSH : ordering by tag w/ pre and post flushes
|
||||||
* TAG_FUA : ordering by tag w/ pre flush and FUA write
|
* TAG_FUA : ordering by tag w/ pre flush and FUA write
|
||||||
*/
|
*/
|
||||||
QUEUE_ORDERED_NONE = 0x00,
|
QUEUE_ORDERED_BY_DRAIN = 0x01,
|
||||||
QUEUE_ORDERED_DRAIN = 0x01,
|
QUEUE_ORDERED_BY_TAG = 0x02,
|
||||||
QUEUE_ORDERED_TAG = 0x02,
|
QUEUE_ORDERED_DO_PREFLUSH = 0x10,
|
||||||
|
QUEUE_ORDERED_DO_BAR = 0x20,
|
||||||
|
QUEUE_ORDERED_DO_POSTFLUSH = 0x40,
|
||||||
|
QUEUE_ORDERED_DO_FUA = 0x80,
|
||||||
|
|
||||||
QUEUE_ORDERED_PREFLUSH = 0x10,
|
QUEUE_ORDERED_NONE = 0x00,
|
||||||
QUEUE_ORDERED_POSTFLUSH = 0x20,
|
|
||||||
QUEUE_ORDERED_FUA = 0x40,
|
|
||||||
|
|
||||||
QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN |
|
QUEUE_ORDERED_DRAIN = QUEUE_ORDERED_BY_DRAIN |
|
||||||
QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH,
|
QUEUE_ORDERED_DO_BAR,
|
||||||
QUEUE_ORDERED_DRAIN_FUA = QUEUE_ORDERED_DRAIN |
|
QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN |
|
||||||
QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA,
|
QUEUE_ORDERED_DO_PREFLUSH |
|
||||||
QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG |
|
QUEUE_ORDERED_DO_POSTFLUSH,
|
||||||
QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH,
|
QUEUE_ORDERED_DRAIN_FUA = QUEUE_ORDERED_DRAIN |
|
||||||
QUEUE_ORDERED_TAG_FUA = QUEUE_ORDERED_TAG |
|
QUEUE_ORDERED_DO_PREFLUSH |
|
||||||
QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_FUA,
|
QUEUE_ORDERED_DO_FUA,
|
||||||
|
|
||||||
|
QUEUE_ORDERED_TAG = QUEUE_ORDERED_BY_TAG |
|
||||||
|
QUEUE_ORDERED_DO_BAR,
|
||||||
|
QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG |
|
||||||
|
QUEUE_ORDERED_DO_PREFLUSH |
|
||||||
|
QUEUE_ORDERED_DO_POSTFLUSH,
|
||||||
|
QUEUE_ORDERED_TAG_FUA = QUEUE_ORDERED_TAG |
|
||||||
|
QUEUE_ORDERED_DO_PREFLUSH |
|
||||||
|
QUEUE_ORDERED_DO_FUA,
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Ordered operation sequence
|
* Ordered operation sequence
|
||||||
|
@ -585,7 +595,6 @@ enum {
|
||||||
#define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA)
|
#define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA)
|
||||||
#define blk_discard_rq(rq) ((rq)->cmd_flags & REQ_DISCARD)
|
#define blk_discard_rq(rq) ((rq)->cmd_flags & REQ_DISCARD)
|
||||||
#define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
|
#define blk_bidi_rq(rq) ((rq)->next_rq != NULL)
|
||||||
#define blk_empty_barrier(rq) (blk_barrier_rq(rq) && blk_fs_request(rq) && !(rq)->hard_nr_sectors)
|
|
||||||
/* rq->queuelist of dequeued request must be list_empty() */
|
/* rq->queuelist of dequeued request must be list_empty() */
|
||||||
#define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist))
|
#define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist))
|
||||||
|
|
||||||
|
@ -855,10 +864,10 @@ extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
|
||||||
extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
|
extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
|
||||||
extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
|
extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
|
||||||
extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *);
|
extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *);
|
||||||
extern int blk_do_ordered(struct request_queue *, struct request **);
|
extern bool blk_do_ordered(struct request_queue *, struct request **);
|
||||||
extern unsigned blk_ordered_cur_seq(struct request_queue *);
|
extern unsigned blk_ordered_cur_seq(struct request_queue *);
|
||||||
extern unsigned blk_ordered_req_seq(struct request *);
|
extern unsigned blk_ordered_req_seq(struct request *);
|
||||||
extern void blk_ordered_complete_seq(struct request_queue *, unsigned, int);
|
extern bool blk_ordered_complete_seq(struct request_queue *, unsigned, int);
|
||||||
|
|
||||||
extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
|
extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
|
||||||
extern void blk_dump_rq_flags(struct request *, char *);
|
extern void blk_dump_rq_flags(struct request *, char *);
|
||||||
|
@ -977,7 +986,6 @@ static inline void put_dev_sector(Sector p)
|
||||||
|
|
||||||
struct work_struct;
|
struct work_struct;
|
||||||
int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
|
int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
|
||||||
void kblockd_flush_work(struct work_struct *work);
|
|
||||||
|
|
||||||
#define MODULE_ALIAS_BLOCKDEV(major,minor) \
|
#define MODULE_ALIAS_BLOCKDEV(major,minor) \
|
||||||
MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
|
MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
|
||||||
|
|
|
@ -35,6 +35,7 @@ enum bh_state_bits {
|
||||||
BH_Ordered, /* ordered write */
|
BH_Ordered, /* ordered write */
|
||||||
BH_Eopnotsupp, /* operation not supported (barrier) */
|
BH_Eopnotsupp, /* operation not supported (barrier) */
|
||||||
BH_Unwritten, /* Buffer is allocated on disk but not written */
|
BH_Unwritten, /* Buffer is allocated on disk but not written */
|
||||||
|
BH_Quiet, /* Buffer Error Prinks to be quiet */
|
||||||
|
|
||||||
BH_PrivateStart,/* not a state bit, but the first bit available
|
BH_PrivateStart,/* not a state bit, but the first bit available
|
||||||
* for private allocation by other entities
|
* for private allocation by other entities
|
||||||
|
|
|
@ -28,7 +28,7 @@ typedef void (elevator_activate_req_fn) (struct request_queue *, struct request
|
||||||
typedef void (elevator_deactivate_req_fn) (struct request_queue *, struct request *);
|
typedef void (elevator_deactivate_req_fn) (struct request_queue *, struct request *);
|
||||||
|
|
||||||
typedef void *(elevator_init_fn) (struct request_queue *);
|
typedef void *(elevator_init_fn) (struct request_queue *);
|
||||||
typedef void (elevator_exit_fn) (elevator_t *);
|
typedef void (elevator_exit_fn) (struct elevator_queue *);
|
||||||
|
|
||||||
struct elevator_ops
|
struct elevator_ops
|
||||||
{
|
{
|
||||||
|
@ -62,8 +62,8 @@ struct elevator_ops
|
||||||
|
|
||||||
struct elv_fs_entry {
|
struct elv_fs_entry {
|
||||||
struct attribute attr;
|
struct attribute attr;
|
||||||
ssize_t (*show)(elevator_t *, char *);
|
ssize_t (*show)(struct elevator_queue *, char *);
|
||||||
ssize_t (*store)(elevator_t *, const char *, size_t);
|
ssize_t (*store)(struct elevator_queue *, const char *, size_t);
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -130,7 +130,7 @@ extern ssize_t elv_iosched_show(struct request_queue *, char *);
|
||||||
extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t);
|
extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t);
|
||||||
|
|
||||||
extern int elevator_init(struct request_queue *, char *);
|
extern int elevator_init(struct request_queue *, char *);
|
||||||
extern void elevator_exit(elevator_t *);
|
extern void elevator_exit(struct elevator_queue *);
|
||||||
extern int elv_rq_merge_ok(struct request *, struct bio *);
|
extern int elv_rq_merge_ok(struct request *, struct bio *);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -126,6 +126,7 @@ struct blk_scsi_cmd_filter {
|
||||||
struct disk_part_tbl {
|
struct disk_part_tbl {
|
||||||
struct rcu_head rcu_head;
|
struct rcu_head rcu_head;
|
||||||
int len;
|
int len;
|
||||||
|
struct hd_struct *last_lookup;
|
||||||
struct hd_struct *part[];
|
struct hd_struct *part[];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -232,8 +232,9 @@ struct mm_struct {
|
||||||
struct core_state *core_state; /* coredumping support */
|
struct core_state *core_state; /* coredumping support */
|
||||||
|
|
||||||
/* aio bits */
|
/* aio bits */
|
||||||
rwlock_t ioctx_list_lock; /* aio lock */
|
spinlock_t ioctx_lock;
|
||||||
struct kioctx *ioctx_list;
|
struct hlist_head ioctx_list;
|
||||||
|
|
||||||
#ifdef CONFIG_MM_OWNER
|
#ifdef CONFIG_MM_OWNER
|
||||||
/*
|
/*
|
||||||
* "owner" points to a task that is regarded as the canonical
|
* "owner" points to a task that is regarded as the canonical
|
||||||
|
|
|
@ -135,19 +135,14 @@ typedef __s64 int64_t;
|
||||||
*
|
*
|
||||||
* Linux always considers sectors to be 512 bytes long independently
|
* Linux always considers sectors to be 512 bytes long independently
|
||||||
* of the devices real block size.
|
* of the devices real block size.
|
||||||
|
*
|
||||||
|
* blkcnt_t is the type of the inode's block count.
|
||||||
*/
|
*/
|
||||||
#ifdef CONFIG_LBD
|
#ifdef CONFIG_LBD
|
||||||
typedef u64 sector_t;
|
typedef u64 sector_t;
|
||||||
#else
|
|
||||||
typedef unsigned long sector_t;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The type of the inode's block count.
|
|
||||||
*/
|
|
||||||
#ifdef CONFIG_LSF
|
|
||||||
typedef u64 blkcnt_t;
|
typedef u64 blkcnt_t;
|
||||||
#else
|
#else
|
||||||
|
typedef unsigned long sector_t;
|
||||||
typedef unsigned long blkcnt_t;
|
typedef unsigned long blkcnt_t;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -1037,8 +1037,6 @@ NORET_TYPE void do_exit(long code)
|
||||||
* task into the wait for ever nirwana as well.
|
* task into the wait for ever nirwana as well.
|
||||||
*/
|
*/
|
||||||
tsk->flags |= PF_EXITPIDONE;
|
tsk->flags |= PF_EXITPIDONE;
|
||||||
if (tsk->io_context)
|
|
||||||
exit_io_context();
|
|
||||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||||
schedule();
|
schedule();
|
||||||
}
|
}
|
||||||
|
|
|
@ -415,8 +415,8 @@ static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p)
|
||||||
set_mm_counter(mm, file_rss, 0);
|
set_mm_counter(mm, file_rss, 0);
|
||||||
set_mm_counter(mm, anon_rss, 0);
|
set_mm_counter(mm, anon_rss, 0);
|
||||||
spin_lock_init(&mm->page_table_lock);
|
spin_lock_init(&mm->page_table_lock);
|
||||||
rwlock_init(&mm->ioctx_list_lock);
|
spin_lock_init(&mm->ioctx_lock);
|
||||||
mm->ioctx_list = NULL;
|
INIT_HLIST_HEAD(&mm->ioctx_list);
|
||||||
mm->free_area_cache = TASK_UNMAPPED_BASE;
|
mm->free_area_cache = TASK_UNMAPPED_BASE;
|
||||||
mm->cached_hole_size = ~0UL;
|
mm->cached_hole_size = ~0UL;
|
||||||
mm_init_owner(mm, p);
|
mm_init_owner(mm, p);
|
||||||
|
|
|
@ -198,8 +198,13 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
|
||||||
/*
|
/*
|
||||||
* irk, bounce it
|
* irk, bounce it
|
||||||
*/
|
*/
|
||||||
if (!bio)
|
if (!bio) {
|
||||||
bio = bio_alloc(GFP_NOIO, (*bio_orig)->bi_vcnt);
|
unsigned int cnt = (*bio_orig)->bi_vcnt;
|
||||||
|
|
||||||
|
bio = bio_alloc(GFP_NOIO, cnt);
|
||||||
|
memset(bio->bi_io_vec, 0, cnt * sizeof(struct bio_vec));
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
to = bio->bi_io_vec + i;
|
to = bio->bi_io_vec + i;
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue