mirror of https://gitee.com/openkylin/linux.git
scsi: add new scsi-command flag for tagged commands
Currently scsi piggy backs on the block layer to define the concept of a tagged command. But we want to be able to have block-level host-wide tags assigned even for untagged commands like the initial INQUIRY, so add a new SCSI-level flag for commands that are tagged at the scsi level, so that even commands without that set can have tags assigned to them. Note that this alredy is the case for the blk-mq code path, and this just lets the old path catch up with it. We also set this flag based upon sdev->simple_tags instead of the block queue flag, so that it is entirely independent of the block layer tagging, and thus always correct even if a driver doesn't use block level tagging yet. Also remove the old blk_rq_tagged; it was only used by SCSI drivers, and removing it forces them to look for the proper replacement. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Mike Christie <michaelc@cs.wisc.edu> Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com> Reviewed-by: Hannes Reinecke <hare@suse.de>
This commit is contained in:
parent
a62182f338
commit
125c99bc8b
|
@ -827,10 +827,6 @@ but in the event of any barrier requests in the tag queue we need to ensure
|
||||||
that requests are restarted in the order they were queue. This may happen
|
that requests are restarted in the order they were queue. This may happen
|
||||||
if the driver needs to use blk_queue_invalidate_tags().
|
if the driver needs to use blk_queue_invalidate_tags().
|
||||||
|
|
||||||
Tagging also defines a new request flag, REQ_QUEUED. This is set whenever
|
|
||||||
a request is currently tagged. You should not use this flag directly,
|
|
||||||
blk_rq_tagged(rq) is the portable way to do so.
|
|
||||||
|
|
||||||
3.3 I/O Submission
|
3.3 I/O Submission
|
||||||
|
|
||||||
The routine submit_bio() is used to submit a single io. Higher level i/o
|
The routine submit_bio() is used to submit a single io. Higher level i/o
|
||||||
|
|
|
@ -1266,7 +1266,7 @@ void blk_requeue_request(struct request_queue *q, struct request *rq)
|
||||||
blk_clear_rq_complete(rq);
|
blk_clear_rq_complete(rq);
|
||||||
trace_block_rq_requeue(q, rq);
|
trace_block_rq_requeue(q, rq);
|
||||||
|
|
||||||
if (blk_rq_tagged(rq))
|
if (rq->cmd_flags & REQ_QUEUED)
|
||||||
blk_queue_end_tag(q, rq);
|
blk_queue_end_tag(q, rq);
|
||||||
|
|
||||||
BUG_ON(blk_queued_rq(rq));
|
BUG_ON(blk_queued_rq(rq));
|
||||||
|
@ -2554,7 +2554,7 @@ EXPORT_SYMBOL_GPL(blk_unprep_request);
|
||||||
*/
|
*/
|
||||||
void blk_finish_request(struct request *req, int error)
|
void blk_finish_request(struct request *req, int error)
|
||||||
{
|
{
|
||||||
if (blk_rq_tagged(req))
|
if (req->cmd_flags & REQ_QUEUED)
|
||||||
blk_queue_end_tag(req->q, req);
|
blk_queue_end_tag(req->q, req);
|
||||||
|
|
||||||
BUG_ON(blk_queued_rq(req));
|
BUG_ON(blk_queued_rq(req));
|
||||||
|
|
|
@ -1767,7 +1767,7 @@ NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *)
|
||||||
*/
|
*/
|
||||||
if(NCR_700_get_depth(SCp->device) != 0
|
if(NCR_700_get_depth(SCp->device) != 0
|
||||||
&& (!(hostdata->tag_negotiated & (1<<scmd_id(SCp)))
|
&& (!(hostdata->tag_negotiated & (1<<scmd_id(SCp)))
|
||||||
|| !blk_rq_tagged(SCp->request))) {
|
|| !(SCp->flags & SCMD_TAGGED))) {
|
||||||
CDEBUG(KERN_ERR, SCp, "has non zero depth %d\n",
|
CDEBUG(KERN_ERR, SCp, "has non zero depth %d\n",
|
||||||
NCR_700_get_depth(SCp->device));
|
NCR_700_get_depth(SCp->device));
|
||||||
return SCSI_MLQUEUE_DEVICE_BUSY;
|
return SCSI_MLQUEUE_DEVICE_BUSY;
|
||||||
|
@ -1795,7 +1795,7 @@ NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *)
|
||||||
printk("53c700: scsi%d, command ", SCp->device->host->host_no);
|
printk("53c700: scsi%d, command ", SCp->device->host->host_no);
|
||||||
scsi_print_command(SCp);
|
scsi_print_command(SCp);
|
||||||
#endif
|
#endif
|
||||||
if(blk_rq_tagged(SCp->request)
|
if ((SCp->flags & SCMD_TAGGED)
|
||||||
&& (hostdata->tag_negotiated &(1<<scmd_id(SCp))) == 0
|
&& (hostdata->tag_negotiated &(1<<scmd_id(SCp))) == 0
|
||||||
&& NCR_700_get_tag_neg_state(SCp->device) == NCR_700_START_TAG_NEGOTIATION) {
|
&& NCR_700_get_tag_neg_state(SCp->device) == NCR_700_START_TAG_NEGOTIATION) {
|
||||||
scmd_printk(KERN_ERR, SCp, "Enabling Tag Command Queuing\n");
|
scmd_printk(KERN_ERR, SCp, "Enabling Tag Command Queuing\n");
|
||||||
|
@ -1809,7 +1809,7 @@ NCR_700_queuecommand_lck(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *)
|
||||||
*
|
*
|
||||||
* FIXME: This will royally screw up on multiple LUN devices
|
* FIXME: This will royally screw up on multiple LUN devices
|
||||||
* */
|
* */
|
||||||
if(!blk_rq_tagged(SCp->request)
|
if (!(SCp->flags & SCMD_TAGGED)
|
||||||
&& (hostdata->tag_negotiated &(1<<scmd_id(SCp)))) {
|
&& (hostdata->tag_negotiated &(1<<scmd_id(SCp)))) {
|
||||||
scmd_printk(KERN_INFO, SCp, "Disabling Tag Command Queuing\n");
|
scmd_printk(KERN_INFO, SCp, "Disabling Tag Command Queuing\n");
|
||||||
hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
|
hostdata->tag_negotiated &= ~(1<<scmd_id(SCp));
|
||||||
|
|
|
@ -1447,7 +1447,7 @@ ahc_linux_run_command(struct ahc_softc *ahc, struct ahc_linux_device *dev,
|
||||||
* we are storing a full busy target *lun*
|
* we are storing a full busy target *lun*
|
||||||
* table in SCB space.
|
* table in SCB space.
|
||||||
*/
|
*/
|
||||||
if (!blk_rq_tagged(cmd->request)
|
if (!(cmd->flags & SCMD_TAGGED)
|
||||||
&& (ahc->features & AHC_SCB_BTT) == 0) {
|
&& (ahc->features & AHC_SCB_BTT) == 0) {
|
||||||
int target_offset;
|
int target_offset;
|
||||||
|
|
||||||
|
|
|
@ -1740,7 +1740,7 @@ static void scsi_request_fn(struct request_queue *q)
|
||||||
* we add the dev to the starved list so it eventually gets
|
* we add the dev to the starved list so it eventually gets
|
||||||
* a run when a tag is freed.
|
* a run when a tag is freed.
|
||||||
*/
|
*/
|
||||||
if (blk_queue_tagged(q) && !blk_rq_tagged(req)) {
|
if (blk_queue_tagged(q) && !(req->cmd_flags & REQ_QUEUED)) {
|
||||||
spin_lock_irq(shost->host_lock);
|
spin_lock_irq(shost->host_lock);
|
||||||
if (list_empty(&sdev->starved_entry))
|
if (list_empty(&sdev->starved_entry))
|
||||||
list_add_tail(&sdev->starved_entry,
|
list_add_tail(&sdev->starved_entry,
|
||||||
|
@ -1755,6 +1755,11 @@ static void scsi_request_fn(struct request_queue *q)
|
||||||
if (!scsi_host_queue_ready(q, shost, sdev))
|
if (!scsi_host_queue_ready(q, shost, sdev))
|
||||||
goto host_not_ready;
|
goto host_not_ready;
|
||||||
|
|
||||||
|
if (sdev->simple_tags)
|
||||||
|
cmd->flags |= SCMD_TAGGED;
|
||||||
|
else
|
||||||
|
cmd->flags &= ~SCMD_TAGGED;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Finally, initialize any error handling parameters, and set up
|
* Finally, initialize any error handling parameters, and set up
|
||||||
* the timers for timeouts.
|
* the timers for timeouts.
|
||||||
|
@ -1908,10 +1913,10 @@ static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req,
|
||||||
blk_mq_start_request(req);
|
blk_mq_start_request(req);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (blk_queue_tagged(q))
|
if (sdev->simple_tags)
|
||||||
req->cmd_flags |= REQ_QUEUED;
|
cmd->flags |= SCMD_TAGGED;
|
||||||
else
|
else
|
||||||
req->cmd_flags &= ~REQ_QUEUED;
|
cmd->flags &= ~SCMD_TAGGED;
|
||||||
|
|
||||||
scsi_init_cmd_errh(cmd);
|
scsi_init_cmd_errh(cmd);
|
||||||
cmd->scsi_done = scsi_mq_done;
|
cmd->scsi_done = scsi_mq_done;
|
||||||
|
|
|
@ -181,7 +181,7 @@ static int uas_get_tag(struct scsi_cmnd *cmnd)
|
||||||
{
|
{
|
||||||
int tag;
|
int tag;
|
||||||
|
|
||||||
if (blk_rq_tagged(cmnd->request))
|
if (cmnd->flags & SCMD_TAGGED)
|
||||||
tag = cmnd->request->tag + 2;
|
tag = cmnd->request->tag + 2;
|
||||||
else
|
else
|
||||||
tag = 1;
|
tag = 1;
|
||||||
|
|
|
@ -1136,7 +1136,6 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk)
|
||||||
/*
|
/*
|
||||||
* tag stuff
|
* tag stuff
|
||||||
*/
|
*/
|
||||||
#define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED)
|
|
||||||
extern int blk_queue_start_tag(struct request_queue *, struct request *);
|
extern int blk_queue_start_tag(struct request_queue *, struct request *);
|
||||||
extern struct request *blk_queue_find_tag(struct request_queue *, int);
|
extern struct request *blk_queue_find_tag(struct request_queue *, int);
|
||||||
extern void blk_queue_end_tag(struct request_queue *, struct request *);
|
extern void blk_queue_end_tag(struct request_queue *, struct request *);
|
||||||
|
|
|
@ -53,6 +53,9 @@ struct scsi_pointer {
|
||||||
volatile int phase;
|
volatile int phase;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* for scmd->flags */
|
||||||
|
#define SCMD_TAGGED (1 << 0)
|
||||||
|
|
||||||
struct scsi_cmnd {
|
struct scsi_cmnd {
|
||||||
struct scsi_device *device;
|
struct scsi_device *device;
|
||||||
struct list_head list; /* scsi_cmnd participates in queue lists */
|
struct list_head list; /* scsi_cmnd participates in queue lists */
|
||||||
|
@ -132,6 +135,7 @@ struct scsi_cmnd {
|
||||||
* to be at an address < 16Mb). */
|
* to be at an address < 16Mb). */
|
||||||
|
|
||||||
int result; /* Status code from lower level driver */
|
int result; /* Status code from lower level driver */
|
||||||
|
int flags; /* Command flags */
|
||||||
|
|
||||||
unsigned char tag; /* SCSI-II queued command tag */
|
unsigned char tag; /* SCSI-II queued command tag */
|
||||||
};
|
};
|
||||||
|
|
|
@ -101,11 +101,9 @@ static inline void scsi_deactivate_tcq(struct scsi_device *sdev, int depth)
|
||||||
**/
|
**/
|
||||||
static inline int scsi_populate_tag_msg(struct scsi_cmnd *cmd, char *msg)
|
static inline int scsi_populate_tag_msg(struct scsi_cmnd *cmd, char *msg)
|
||||||
{
|
{
|
||||||
struct request *req = cmd->request;
|
if (cmd->flags & SCMD_TAGGED) {
|
||||||
|
|
||||||
if (blk_rq_tagged(req)) {
|
|
||||||
*msg++ = MSG_SIMPLE_TAG;
|
*msg++ = MSG_SIMPLE_TAG;
|
||||||
*msg++ = req->tag;
|
*msg++ = cmd->request->tag;
|
||||||
return 2;
|
return 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue