mirror of https://gitee.com/openkylin/linux.git
[PATCH] Remove ->waiting member from struct request
As the comments indicates in blkdev.h, we can fold it into ->end_io_data usage as that is really what ->waiting is. Fixup the users of blk_end_sync_rq(). Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
8a8e674cb1
commit
c00895ab2f
|
@ -67,8 +67,7 @@ inline int elv_rq_merge_ok(struct request *rq, struct bio *bio)
|
|||
/*
|
||||
* same device and no special stuff set, merge is ok
|
||||
*/
|
||||
if (rq->rq_disk == bio->bi_bdev->bd_disk &&
|
||||
!rq->waiting && !rq->special)
|
||||
if (rq->rq_disk == bio->bi_bdev->bd_disk && !rq->special)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -291,7 +291,6 @@ static inline void rq_init(request_queue_t *q, struct request *rq)
|
|||
rq->buffer = NULL;
|
||||
rq->ref_count = 1;
|
||||
rq->q = q;
|
||||
rq->waiting = NULL;
|
||||
rq->special = NULL;
|
||||
rq->data_len = 0;
|
||||
rq->data = NULL;
|
||||
|
@ -451,6 +450,7 @@ static void queue_flush(request_queue_t *q, unsigned which)
|
|||
rq->cmd_flags = REQ_HARDBARRIER;
|
||||
rq_init(q, rq);
|
||||
rq->elevator_private = NULL;
|
||||
rq->elevator_private2 = NULL;
|
||||
rq->rq_disk = q->bar_rq.rq_disk;
|
||||
rq->rl = NULL;
|
||||
rq->end_io = end_io;
|
||||
|
@ -479,6 +479,7 @@ static inline struct request *start_ordered(request_queue_t *q,
|
|||
rq->cmd_flags |= REQ_RW;
|
||||
rq->cmd_flags |= q->ordered & QUEUE_ORDERED_FUA ? REQ_FUA : 0;
|
||||
rq->elevator_private = NULL;
|
||||
rq->elevator_private2 = NULL;
|
||||
rq->rl = NULL;
|
||||
init_request_from_bio(rq, q->orig_bar_rq->bio);
|
||||
rq->end_io = bar_end_io;
|
||||
|
@ -2569,10 +2570,9 @@ int blk_execute_rq(request_queue_t *q, struct gendisk *bd_disk,
|
|||
rq->sense_len = 0;
|
||||
}
|
||||
|
||||
rq->waiting = &wait;
|
||||
rq->end_io_data = &wait;
|
||||
blk_execute_rq_nowait(q, bd_disk, rq, at_head, blk_end_sync_rq);
|
||||
wait_for_completion(&wait);
|
||||
rq->waiting = NULL;
|
||||
|
||||
if (rq->errors)
|
||||
err = -EIO;
|
||||
|
@ -2736,9 +2736,9 @@ EXPORT_SYMBOL(blk_put_request);
|
|||
*/
|
||||
void blk_end_sync_rq(struct request *rq, int error)
|
||||
{
|
||||
struct completion *waiting = rq->waiting;
|
||||
struct completion *waiting = rq->end_io_data;
|
||||
|
||||
rq->waiting = NULL;
|
||||
rq->end_io_data = NULL;
|
||||
__blk_put_request(rq->q, rq);
|
||||
|
||||
/*
|
||||
|
@ -2801,7 +2801,7 @@ static int attempt_merge(request_queue_t *q, struct request *req,
|
|||
|
||||
if (rq_data_dir(req) != rq_data_dir(next)
|
||||
|| req->rq_disk != next->rq_disk
|
||||
|| next->waiting || next->special)
|
||||
|| next->special)
|
||||
return 0;
|
||||
|
||||
/*
|
||||
|
@ -2886,7 +2886,6 @@ static void init_request_from_bio(struct request *req, struct bio *bio)
|
|||
req->nr_phys_segments = bio_phys_segments(req->q, bio);
|
||||
req->nr_hw_segments = bio_hw_segments(req->q, bio);
|
||||
req->buffer = bio_data(bio); /* see ->buffer comment above */
|
||||
req->waiting = NULL;
|
||||
req->bio = req->biotail = bio;
|
||||
req->ioprio = bio_prio(bio);
|
||||
req->rq_disk = bio->bi_bdev->bd_disk;
|
||||
|
|
|
@ -3331,7 +3331,7 @@ static int DAC960_process_queue(DAC960_Controller_T *Controller, struct request_
|
|||
Command->DmaDirection = PCI_DMA_TODEVICE;
|
||||
Command->CommandType = DAC960_WriteCommand;
|
||||
}
|
||||
Command->Completion = Request->waiting;
|
||||
Command->Completion = Request->end_io_data;
|
||||
Command->LogicalDriveNumber = (long)Request->rq_disk->private_data;
|
||||
Command->BlockNumber = Request->sector;
|
||||
Command->BlockCount = Request->nr_sectors;
|
||||
|
|
|
@ -722,11 +722,10 @@ static int pd_special_command(struct pd_unit *disk,
|
|||
rq.rq_status = RQ_ACTIVE;
|
||||
rq.rq_disk = disk->gd;
|
||||
rq.ref_count = 1;
|
||||
rq.waiting = &wait;
|
||||
rq.end_io_data = &wait;
|
||||
rq.end_io = blk_end_sync_rq;
|
||||
blk_insert_request(disk->gd->queue, &rq, 0, func);
|
||||
wait_for_completion(&wait);
|
||||
rq.waiting = NULL;
|
||||
if (rq.errors)
|
||||
err = -EIO;
|
||||
blk_put_request(&rq);
|
||||
|
|
|
@ -375,7 +375,7 @@ static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *
|
|||
rq->cmd_len = COMMAND_SIZE(rq->cmd[0]);
|
||||
|
||||
rq->ref_count++;
|
||||
rq->waiting = &wait;
|
||||
rq->end_io_data = &wait;
|
||||
rq->end_io = blk_end_sync_rq;
|
||||
elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 1);
|
||||
generic_unplug_device(q);
|
||||
|
|
|
@ -141,7 +141,7 @@ enum {
|
|||
|
||||
static void ide_complete_power_step(ide_drive_t *drive, struct request *rq, u8 stat, u8 error)
|
||||
{
|
||||
struct request_pm_state *pm = rq->end_io_data;
|
||||
struct request_pm_state *pm = rq->data;
|
||||
|
||||
if (drive->media != ide_disk)
|
||||
return;
|
||||
|
@ -164,7 +164,7 @@ static void ide_complete_power_step(ide_drive_t *drive, struct request *rq, u8 s
|
|||
|
||||
static ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq)
|
||||
{
|
||||
struct request_pm_state *pm = rq->end_io_data;
|
||||
struct request_pm_state *pm = rq->data;
|
||||
ide_task_t *args = rq->special;
|
||||
|
||||
memset(args, 0, sizeof(*args));
|
||||
|
@ -421,7 +421,7 @@ void ide_end_drive_cmd (ide_drive_t *drive, u8 stat, u8 err)
|
|||
}
|
||||
}
|
||||
} else if (blk_pm_request(rq)) {
|
||||
struct request_pm_state *pm = rq->end_io_data;
|
||||
struct request_pm_state *pm = rq->data;
|
||||
#ifdef DEBUG_PM
|
||||
printk("%s: complete_power_step(step: %d, stat: %x, err: %x)\n",
|
||||
drive->name, rq->pm->pm_step, stat, err);
|
||||
|
@ -933,7 +933,7 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive,
|
|||
|
||||
static void ide_check_pm_state(ide_drive_t *drive, struct request *rq)
|
||||
{
|
||||
struct request_pm_state *pm = rq->end_io_data;
|
||||
struct request_pm_state *pm = rq->data;
|
||||
|
||||
if (blk_pm_suspend_request(rq) &&
|
||||
pm->pm_step == ide_pm_state_start_suspend)
|
||||
|
@ -1018,7 +1018,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq)
|
|||
rq->cmd_type == REQ_TYPE_ATA_TASKFILE)
|
||||
return execute_drive_cmd(drive, rq);
|
||||
else if (blk_pm_request(rq)) {
|
||||
struct request_pm_state *pm = rq->end_io_data;
|
||||
struct request_pm_state *pm = rq->data;
|
||||
#ifdef DEBUG_PM
|
||||
printk("%s: start_power_step(step: %d)\n",
|
||||
drive->name, rq->pm->pm_step);
|
||||
|
@ -1718,7 +1718,7 @@ int ide_do_drive_cmd (ide_drive_t *drive, struct request *rq, ide_action_t actio
|
|||
*/
|
||||
if (must_wait) {
|
||||
rq->ref_count++;
|
||||
rq->waiting = &wait;
|
||||
rq->end_io_data = &wait;
|
||||
rq->end_io = blk_end_sync_rq;
|
||||
}
|
||||
|
||||
|
@ -1736,7 +1736,6 @@ int ide_do_drive_cmd (ide_drive_t *drive, struct request *rq, ide_action_t actio
|
|||
err = 0;
|
||||
if (must_wait) {
|
||||
wait_for_completion(&wait);
|
||||
rq->waiting = NULL;
|
||||
if (rq->errors)
|
||||
err = -EIO;
|
||||
|
||||
|
|
|
@ -2773,7 +2773,7 @@ static void idetape_wait_for_request (ide_drive_t *drive, struct request *rq)
|
|||
return;
|
||||
}
|
||||
#endif /* IDETAPE_DEBUG_BUGS */
|
||||
rq->waiting = &wait;
|
||||
rq->end_io_data = &wait;
|
||||
rq->end_io = blk_end_sync_rq;
|
||||
spin_unlock_irq(&tape->spinlock);
|
||||
wait_for_completion(&wait);
|
||||
|
|
|
@ -1219,7 +1219,7 @@ static int generic_ide_suspend(struct device *dev, pm_message_t mesg)
|
|||
memset(&args, 0, sizeof(args));
|
||||
rq.cmd_type = REQ_TYPE_PM_SUSPEND;
|
||||
rq.special = &args;
|
||||
rq.end_io_data = &rqpm;
|
||||
rq.data = &rqpm;
|
||||
rqpm.pm_step = ide_pm_state_start_suspend;
|
||||
if (mesg.event == PM_EVENT_PRETHAW)
|
||||
mesg.event = PM_EVENT_FREEZE;
|
||||
|
@ -1240,7 +1240,7 @@ static int generic_ide_resume(struct device *dev)
|
|||
memset(&args, 0, sizeof(args));
|
||||
rq.cmd_type = REQ_TYPE_PM_RESUME;
|
||||
rq.special = &args;
|
||||
rq.end_io_data = &rqpm;
|
||||
rq.data = &rqpm;
|
||||
rqpm.pm_step = ide_pm_state_start_resume;
|
||||
rqpm.pm_state = PM_EVENT_ON;
|
||||
|
||||
|
|
|
@ -266,7 +266,6 @@ struct request {
|
|||
request_queue_t *q;
|
||||
struct request_list *rl;
|
||||
|
||||
struct completion *waiting;
|
||||
void *special;
|
||||
char *buffer;
|
||||
|
||||
|
@ -285,7 +284,7 @@ struct request {
|
|||
int retries;
|
||||
|
||||
/*
|
||||
* completion callback. end_io_data should be folded in with waiting
|
||||
* completion callback.
|
||||
*/
|
||||
rq_end_io_fn *end_io;
|
||||
void *end_io_data;
|
||||
|
|
Loading…
Reference in New Issue