mirror of https://gitee.com/openkylin/linux.git
SCSI misc on 20210228
This is a few driver updates (iscsi, mpt3sas) that were still in the staging queue when the merge window opened (all committed on or before 8 Feb) and some small bug fixes which came in during the merge window (all committed on 22 Feb). Signed-off-by: James E.J. Bottomley <jejb@linux.ibm.com> -----BEGIN PGP SIGNATURE----- iJwEABMIAEQWIQTnYEDbdso9F2cI+arnQslM7pishQUCYDvKnSYcamFtZXMuYm90 dG9tbGV5QGhhbnNlbnBhcnRuZXJzaGlwLmNvbQAKCRDnQslM7pishXtMAQDsmEay V/GLOBE3lvURgUz/AANItBVQ+RTbo+3r25Q7PAEA/zqxVW3NitlJPzLA/MkWQ7p6 gnfxL088xAtZr24sdHA= =whGs -----END PGP SIGNATURE----- Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi Pull more SCSI updates from James Bottomley: "This is a few driver updates (iscsi, mpt3sas) that were still in the staging queue when the merge window opened (all committed on or before 8 Feb) and some small bug fixes which came in during the merge window (all committed on 22 Feb)" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (30 commits) scsi: hpsa: Correct dev cmds outstanding for retried cmds scsi: sd: Fix Opal support scsi: target: tcmu: Fix memory leak caused by wrong uio usage scsi: target: tcmu: Move some functions without code change scsi: sd: sd_zbc: Don't pass GFP_NOIO to kvcalloc scsi: aic7xxx: Remove unused function pointer typedef ahc_bus_suspend/resume_t scsi: bnx2fc: Fix Kconfig warning & CNIC build errors scsi: ufs: Fix a duplicate dev quirk number scsi: aic79xx: Fix spelling of version scsi: target: core: Prevent underflow for service actions scsi: target: core: Add cmd length set before cmd complete scsi: iscsi: Drop session lock in iscsi_session_chkready() scsi: qla4xxx: Use iscsi_is_session_online() scsi: libiscsi: Reset max/exp cmdsn during recovery scsi: iscsi_tcp: Fix shost can_queue initialization scsi: libiscsi: Add helper to calculate max SCSI cmds per session scsi: libiscsi: Fix iSCSI host workq destruction scsi: libiscsi: Fix iscsi_task use after free() scsi: libiscsi: Drop taskqueuelock scsi: libiscsi: Fix iscsi_prep_scsi_cmd_pdu() error handling ...
This commit is contained in:
commit
0b311e34d5
|
@ -1175,7 +1175,7 @@ struct ahd_softc {
|
|||
uint8_t tqinfifonext;
|
||||
|
||||
/*
|
||||
* Cached verson of the hs_mailbox so we can avoid
|
||||
* Cached version of the hs_mailbox so we can avoid
|
||||
* pausing the sequencer during mailbox updates.
|
||||
*/
|
||||
uint8_t hs_mailbox;
|
||||
|
|
|
@ -896,8 +896,6 @@ union ahc_bus_softc {
|
|||
|
||||
typedef void (*ahc_bus_intr_t)(struct ahc_softc *);
|
||||
typedef int (*ahc_bus_chip_init_t)(struct ahc_softc *);
|
||||
typedef int (*ahc_bus_suspend_t)(struct ahc_softc *);
|
||||
typedef int (*ahc_bus_resume_t)(struct ahc_softc *);
|
||||
typedef void ahc_callback_t (void *);
|
||||
|
||||
struct ahc_softc {
|
||||
|
|
|
@ -5,6 +5,7 @@ config SCSI_BNX2X_FCOE
|
|||
depends on (IPV6 || IPV6=n)
|
||||
depends on LIBFC
|
||||
depends on LIBFCOE
|
||||
depends on MMU
|
||||
select NETDEVICES
|
||||
select ETHERNET
|
||||
select NET_VENDOR_BROADCOM
|
||||
|
|
|
@ -1171,10 +1171,8 @@ static void bnx2i_cleanup_task(struct iscsi_task *task)
|
|||
bnx2i_send_cmd_cleanup_req(hba, task->dd_data);
|
||||
|
||||
spin_unlock_bh(&conn->session->back_lock);
|
||||
spin_unlock_bh(&conn->session->frwd_lock);
|
||||
wait_for_completion_timeout(&bnx2i_conn->cmd_cleanup_cmpl,
|
||||
msecs_to_jiffies(ISCSI_CMD_CLEANUP_TIMEOUT));
|
||||
spin_lock_bh(&conn->session->frwd_lock);
|
||||
spin_lock_bh(&conn->session->back_lock);
|
||||
}
|
||||
bnx2i_iscsi_unmap_sg_list(task->dd_data);
|
||||
|
|
|
@ -1151,7 +1151,10 @@ static void __enqueue_cmd_and_start_io(struct ctlr_info *h,
|
|||
{
|
||||
dial_down_lockup_detection_during_fw_flash(h, c);
|
||||
atomic_inc(&h->commands_outstanding);
|
||||
if (c->device)
|
||||
/*
|
||||
* Check to see if the command is being retried.
|
||||
*/
|
||||
if (c->device && !c->retry_pending)
|
||||
atomic_inc(&c->device->commands_outstanding);
|
||||
|
||||
reply_queue = h->reply_map[raw_smp_processor_id()];
|
||||
|
@ -5567,7 +5570,8 @@ static inline void hpsa_cmd_partial_init(struct ctlr_info *h, int index,
|
|||
}
|
||||
|
||||
static int hpsa_ioaccel_submit(struct ctlr_info *h,
|
||||
struct CommandList *c, struct scsi_cmnd *cmd)
|
||||
struct CommandList *c, struct scsi_cmnd *cmd,
|
||||
bool retry)
|
||||
{
|
||||
struct hpsa_scsi_dev_t *dev = cmd->device->hostdata;
|
||||
int rc = IO_ACCEL_INELIGIBLE;
|
||||
|
@ -5584,18 +5588,22 @@ static int hpsa_ioaccel_submit(struct ctlr_info *h,
|
|||
cmd->host_scribble = (unsigned char *) c;
|
||||
|
||||
if (dev->offload_enabled) {
|
||||
hpsa_cmd_init(h, c->cmdindex, c);
|
||||
hpsa_cmd_init(h, c->cmdindex, c); /* Zeroes out all fields */
|
||||
c->cmd_type = CMD_SCSI;
|
||||
c->scsi_cmd = cmd;
|
||||
c->device = dev;
|
||||
if (retry) /* Resubmit but do not increment device->commands_outstanding. */
|
||||
c->retry_pending = true;
|
||||
rc = hpsa_scsi_ioaccel_raid_map(h, c);
|
||||
if (rc < 0) /* scsi_dma_map failed. */
|
||||
rc = SCSI_MLQUEUE_HOST_BUSY;
|
||||
} else if (dev->hba_ioaccel_enabled) {
|
||||
hpsa_cmd_init(h, c->cmdindex, c);
|
||||
hpsa_cmd_init(h, c->cmdindex, c); /* Zeroes out all fields */
|
||||
c->cmd_type = CMD_SCSI;
|
||||
c->scsi_cmd = cmd;
|
||||
c->device = dev;
|
||||
if (retry) /* Resubmit but do not increment device->commands_outstanding. */
|
||||
c->retry_pending = true;
|
||||
rc = hpsa_scsi_ioaccel_direct_map(h, c);
|
||||
if (rc < 0) /* scsi_dma_map failed. */
|
||||
rc = SCSI_MLQUEUE_HOST_BUSY;
|
||||
|
@ -5628,7 +5636,8 @@ static void hpsa_command_resubmit_worker(struct work_struct *work)
|
|||
|
||||
if (c2->error_data.serv_response ==
|
||||
IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL) {
|
||||
rc = hpsa_ioaccel_submit(h, c, cmd);
|
||||
/* Resubmit with the retry_pending flag set. */
|
||||
rc = hpsa_ioaccel_submit(h, c, cmd, true);
|
||||
if (rc == 0)
|
||||
return;
|
||||
if (rc == SCSI_MLQUEUE_HOST_BUSY) {
|
||||
|
@ -5644,6 +5653,15 @@ static void hpsa_command_resubmit_worker(struct work_struct *work)
|
|||
}
|
||||
}
|
||||
hpsa_cmd_partial_init(c->h, c->cmdindex, c);
|
||||
/*
|
||||
* Here we have not come in though queue_command, so we
|
||||
* can set the retry_pending flag to true for a driver initiated
|
||||
* retry attempt (I.E. not a SML retry).
|
||||
* I.E. We are submitting a driver initiated retry.
|
||||
* Note: hpsa_ciss_submit does not zero out the command fields like
|
||||
* ioaccel submit does.
|
||||
*/
|
||||
c->retry_pending = true;
|
||||
if (hpsa_ciss_submit(c->h, c, cmd, dev)) {
|
||||
/*
|
||||
* If we get here, it means dma mapping failed. Try
|
||||
|
@ -5706,11 +5724,16 @@ static int hpsa_scsi_queue_command(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
|
|||
/*
|
||||
* Call alternate submit routine for I/O accelerated commands.
|
||||
* Retries always go down the normal I/O path.
|
||||
* Note: If cmd->retries is non-zero, then this is a SML
|
||||
* initiated retry and not a driver initiated retry.
|
||||
* This command has been obtained from cmd_tagged_alloc
|
||||
* and is therefore a brand-new command.
|
||||
*/
|
||||
if (likely(cmd->retries == 0 &&
|
||||
!blk_rq_is_passthrough(cmd->request) &&
|
||||
h->acciopath_status)) {
|
||||
rc = hpsa_ioaccel_submit(h, c, cmd);
|
||||
/* Submit with the retry_pending flag unset. */
|
||||
rc = hpsa_ioaccel_submit(h, c, cmd, false);
|
||||
if (rc == 0)
|
||||
return 0;
|
||||
if (rc == SCSI_MLQUEUE_HOST_BUSY) {
|
||||
|
@ -6105,6 +6128,7 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)
|
|||
* at init, and managed by cmd_tagged_alloc() and cmd_tagged_free() using the
|
||||
* block request tag as an index into a table of entries. cmd_tagged_free() is
|
||||
* the complement, although cmd_free() may be called instead.
|
||||
* This function is only called for new requests from queue_command.
|
||||
*/
|
||||
static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
|
||||
struct scsi_cmnd *scmd)
|
||||
|
@ -6139,8 +6163,14 @@ static struct CommandList *cmd_tagged_alloc(struct ctlr_info *h,
|
|||
}
|
||||
|
||||
atomic_inc(&c->refcount);
|
||||
|
||||
hpsa_cmd_partial_init(h, idx, c);
|
||||
|
||||
/*
|
||||
* This is a new command obtained from queue_command so
|
||||
* there have not been any driver initiated retry attempts.
|
||||
*/
|
||||
c->retry_pending = false;
|
||||
|
||||
return c;
|
||||
}
|
||||
|
||||
|
@ -6208,6 +6238,13 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h)
|
|||
}
|
||||
hpsa_cmd_partial_init(h, i, c);
|
||||
c->device = NULL;
|
||||
|
||||
/*
|
||||
* cmd_alloc is for "internal" commands and they are never
|
||||
* retried.
|
||||
*/
|
||||
c->retry_pending = false;
|
||||
|
||||
return c;
|
||||
}
|
||||
|
||||
|
|
|
@ -448,7 +448,7 @@ struct CommandList {
|
|||
*/
|
||||
struct hpsa_scsi_dev_t *phys_disk;
|
||||
|
||||
int abort_pending;
|
||||
bool retry_pending;
|
||||
struct hpsa_scsi_dev_t *device;
|
||||
atomic_t refcount; /* Must be last to avoid memset in hpsa_cmd_init() */
|
||||
} __aligned(COMMANDLIST_ALIGNMENT);
|
||||
|
|
|
@ -1480,8 +1480,6 @@ static enum sci_status
|
|||
stp_request_pio_await_h2d_completion_tc_event(struct isci_request *ireq,
|
||||
u32 completion_code)
|
||||
{
|
||||
enum sci_status status = SCI_SUCCESS;
|
||||
|
||||
switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
|
||||
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
|
||||
ireq->scu_status = SCU_TASK_DONE_GOOD;
|
||||
|
@ -1500,7 +1498,7 @@ stp_request_pio_await_h2d_completion_tc_event(struct isci_request *ireq,
|
|||
break;
|
||||
}
|
||||
|
||||
return status;
|
||||
return SCI_SUCCESS;
|
||||
}
|
||||
|
||||
static enum sci_status
|
||||
|
@ -2152,8 +2150,6 @@ static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq
|
|||
static enum sci_status atapi_raw_completion(struct isci_request *ireq, u32 completion_code,
|
||||
enum sci_base_request_states next)
|
||||
{
|
||||
enum sci_status status = SCI_SUCCESS;
|
||||
|
||||
switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
|
||||
case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
|
||||
ireq->scu_status = SCU_TASK_DONE_GOOD;
|
||||
|
@ -2172,7 +2168,7 @@ static enum sci_status atapi_raw_completion(struct isci_request *ireq, u32 compl
|
|||
break;
|
||||
}
|
||||
|
||||
return status;
|
||||
return SCI_SUCCESS;
|
||||
}
|
||||
|
||||
static enum sci_status atapi_data_tc_completion_handler(struct isci_request *ireq,
|
||||
|
|
|
@ -847,6 +847,7 @@ iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
|
|||
struct iscsi_session *session;
|
||||
struct iscsi_sw_tcp_host *tcp_sw_host;
|
||||
struct Scsi_Host *shost;
|
||||
int rc;
|
||||
|
||||
if (ep) {
|
||||
printk(KERN_ERR "iscsi_tcp: invalid ep %p.\n", ep);
|
||||
|
@ -864,6 +865,11 @@ iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
|
|||
shost->max_channel = 0;
|
||||
shost->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
|
||||
|
||||
rc = iscsi_host_get_max_scsi_cmds(shost, cmds_max);
|
||||
if (rc < 0)
|
||||
goto free_host;
|
||||
shost->can_queue = rc;
|
||||
|
||||
if (iscsi_host_add(shost, NULL))
|
||||
goto free_host;
|
||||
|
||||
|
@ -878,7 +884,6 @@ iscsi_sw_tcp_session_create(struct iscsi_endpoint *ep, uint16_t cmds_max,
|
|||
tcp_sw_host = iscsi_host_priv(shost);
|
||||
tcp_sw_host->session = session;
|
||||
|
||||
shost->can_queue = session->scsi_cmds_max;
|
||||
if (iscsi_tcp_r2tpool_alloc(session))
|
||||
goto remove_session;
|
||||
return cls_session;
|
||||
|
@ -981,7 +986,7 @@ static struct scsi_host_template iscsi_sw_tcp_sht = {
|
|||
.name = "iSCSI Initiator over TCP/IP",
|
||||
.queuecommand = iscsi_queuecommand,
|
||||
.change_queue_depth = scsi_change_queue_depth,
|
||||
.can_queue = ISCSI_DEF_XMIT_CMDS_MAX - 1,
|
||||
.can_queue = ISCSI_TOTAL_CMDS_MAX,
|
||||
.sg_tablesize = 4096,
|
||||
.max_sectors = 0xFFFF,
|
||||
.cmd_per_lun = ISCSI_DEF_CMD_PER_LUN,
|
||||
|
|
|
@ -523,16 +523,6 @@ static void iscsi_complete_task(struct iscsi_task *task, int state)
|
|||
WARN_ON_ONCE(task->state == ISCSI_TASK_FREE);
|
||||
task->state = state;
|
||||
|
||||
spin_lock_bh(&conn->taskqueuelock);
|
||||
if (!list_empty(&task->running)) {
|
||||
pr_debug_once("%s while task on list", __func__);
|
||||
list_del_init(&task->running);
|
||||
}
|
||||
spin_unlock_bh(&conn->taskqueuelock);
|
||||
|
||||
if (conn->task == task)
|
||||
conn->task = NULL;
|
||||
|
||||
if (READ_ONCE(conn->ping_task) == task)
|
||||
WRITE_ONCE(conn->ping_task, NULL);
|
||||
|
||||
|
@ -564,11 +554,41 @@ void iscsi_complete_scsi_task(struct iscsi_task *task,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(iscsi_complete_scsi_task);
|
||||
|
||||
/*
|
||||
* Must be called with back and frwd lock
|
||||
*/
|
||||
static bool cleanup_queued_task(struct iscsi_task *task)
|
||||
{
|
||||
struct iscsi_conn *conn = task->conn;
|
||||
bool early_complete = false;
|
||||
|
||||
/* Bad target might have completed task while it was still running */
|
||||
if (task->state == ISCSI_TASK_COMPLETED)
|
||||
early_complete = true;
|
||||
|
||||
if (!list_empty(&task->running)) {
|
||||
list_del_init(&task->running);
|
||||
/*
|
||||
* If it's on a list but still running, this could be from
|
||||
* a bad target sending a rsp early, cleanup from a TMF, or
|
||||
* session recovery.
|
||||
*/
|
||||
if (task->state == ISCSI_TASK_RUNNING ||
|
||||
task->state == ISCSI_TASK_COMPLETED)
|
||||
__iscsi_put_task(task);
|
||||
}
|
||||
|
||||
if (conn->task == task) {
|
||||
conn->task = NULL;
|
||||
__iscsi_put_task(task);
|
||||
}
|
||||
|
||||
return early_complete;
|
||||
}
|
||||
|
||||
/*
|
||||
* session back_lock must be held and if not called for a task that is
|
||||
* still pending or from the xmit thread, then xmit thread must
|
||||
* be suspended.
|
||||
* session frwd lock must be held and if not called for a task that is still
|
||||
* pending or from the xmit thread, then xmit thread must be suspended
|
||||
*/
|
||||
static void fail_scsi_task(struct iscsi_task *task, int err)
|
||||
{
|
||||
|
@ -576,14 +596,11 @@ static void fail_scsi_task(struct iscsi_task *task, int err)
|
|||
struct scsi_cmnd *sc;
|
||||
int state;
|
||||
|
||||
/*
|
||||
* if a command completes and we get a successful tmf response
|
||||
* we will hit this because the scsi eh abort code does not take
|
||||
* a ref to the task.
|
||||
*/
|
||||
sc = task->sc;
|
||||
if (!sc)
|
||||
spin_lock_bh(&conn->session->back_lock);
|
||||
if (cleanup_queued_task(task)) {
|
||||
spin_unlock_bh(&conn->session->back_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
if (task->state == ISCSI_TASK_PENDING) {
|
||||
/*
|
||||
|
@ -598,11 +615,9 @@ static void fail_scsi_task(struct iscsi_task *task, int err)
|
|||
else
|
||||
state = ISCSI_TASK_ABRT_TMF;
|
||||
|
||||
sc = task->sc;
|
||||
sc->result = err << 16;
|
||||
scsi_set_resid(sc, scsi_bufflen(sc));
|
||||
|
||||
/* regular RX path uses back_lock */
|
||||
spin_lock_bh(&conn->session->back_lock);
|
||||
iscsi_complete_task(task, state);
|
||||
spin_unlock_bh(&conn->session->back_lock);
|
||||
}
|
||||
|
@ -748,9 +763,7 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
|
|||
if (session->tt->xmit_task(task))
|
||||
goto free_task;
|
||||
} else {
|
||||
spin_lock_bh(&conn->taskqueuelock);
|
||||
list_add_tail(&task->running, &conn->mgmtqueue);
|
||||
spin_unlock_bh(&conn->taskqueuelock);
|
||||
iscsi_conn_queue_work(conn);
|
||||
}
|
||||
|
||||
|
@ -1411,31 +1424,61 @@ static int iscsi_check_cmdsn_window_closed(struct iscsi_conn *conn)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int iscsi_xmit_task(struct iscsi_conn *conn)
|
||||
static int iscsi_xmit_task(struct iscsi_conn *conn, struct iscsi_task *task,
|
||||
bool was_requeue)
|
||||
{
|
||||
struct iscsi_task *task = conn->task;
|
||||
int rc;
|
||||
|
||||
if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx))
|
||||
return -ENODATA;
|
||||
|
||||
spin_lock_bh(&conn->session->back_lock);
|
||||
if (conn->task == NULL) {
|
||||
|
||||
if (!conn->task) {
|
||||
/* Take a ref so we can access it after xmit_task() */
|
||||
__iscsi_get_task(task);
|
||||
} else {
|
||||
/* Already have a ref from when we failed to send it last call */
|
||||
conn->task = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* If this was a requeue for a R2T we have an extra ref on the task in
|
||||
* case a bad target sends a cmd rsp before we have handled the task.
|
||||
*/
|
||||
if (was_requeue)
|
||||
__iscsi_put_task(task);
|
||||
|
||||
/*
|
||||
* Do this after dropping the extra ref because if this was a requeue
|
||||
* it's removed from that list and cleanup_queued_task would miss it.
|
||||
*/
|
||||
if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
|
||||
/*
|
||||
* Save the task and ref in case we weren't cleaning up this
|
||||
* task and get woken up again.
|
||||
*/
|
||||
conn->task = task;
|
||||
spin_unlock_bh(&conn->session->back_lock);
|
||||
return -ENODATA;
|
||||
}
|
||||
__iscsi_get_task(task);
|
||||
spin_unlock_bh(&conn->session->back_lock);
|
||||
|
||||
spin_unlock_bh(&conn->session->frwd_lock);
|
||||
rc = conn->session->tt->xmit_task(task);
|
||||
spin_lock_bh(&conn->session->frwd_lock);
|
||||
if (!rc) {
|
||||
/* done with this task */
|
||||
task->last_xfer = jiffies;
|
||||
conn->task = NULL;
|
||||
}
|
||||
/* regular RX path uses back_lock */
|
||||
spin_lock(&conn->session->back_lock);
|
||||
if (rc && task->state == ISCSI_TASK_RUNNING) {
|
||||
/*
|
||||
* get an extra ref that is released next time we access it
|
||||
* as conn->task above.
|
||||
*/
|
||||
__iscsi_get_task(task);
|
||||
conn->task = task;
|
||||
}
|
||||
|
||||
__iscsi_put_task(task);
|
||||
spin_unlock(&conn->session->back_lock);
|
||||
return rc;
|
||||
|
@ -1445,9 +1488,7 @@ static int iscsi_xmit_task(struct iscsi_conn *conn)
|
|||
* iscsi_requeue_task - requeue task to run from session workqueue
|
||||
* @task: task to requeue
|
||||
*
|
||||
* LLDs that need to run a task from the session workqueue should call
|
||||
* this. The session frwd_lock must be held. This should only be called
|
||||
* by software drivers.
|
||||
* Callers must have taken a ref to the task that is going to be requeued.
|
||||
*/
|
||||
void iscsi_requeue_task(struct iscsi_task *task)
|
||||
{
|
||||
|
@ -1457,11 +1498,18 @@ void iscsi_requeue_task(struct iscsi_task *task)
|
|||
* this may be on the requeue list already if the xmit_task callout
|
||||
* is handling the r2ts while we are adding new ones
|
||||
*/
|
||||
spin_lock_bh(&conn->taskqueuelock);
|
||||
if (list_empty(&task->running))
|
||||
spin_lock_bh(&conn->session->frwd_lock);
|
||||
if (list_empty(&task->running)) {
|
||||
list_add_tail(&task->running, &conn->requeue);
|
||||
spin_unlock_bh(&conn->taskqueuelock);
|
||||
} else {
|
||||
/*
|
||||
* Don't need the extra ref since it's already requeued and
|
||||
* has a ref.
|
||||
*/
|
||||
iscsi_put_task(task);
|
||||
}
|
||||
iscsi_conn_queue_work(conn);
|
||||
spin_unlock_bh(&conn->session->frwd_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iscsi_requeue_task);
|
||||
|
||||
|
@ -1487,7 +1535,7 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
|
|||
}
|
||||
|
||||
if (conn->task) {
|
||||
rc = iscsi_xmit_task(conn);
|
||||
rc = iscsi_xmit_task(conn, conn->task, false);
|
||||
if (rc)
|
||||
goto done;
|
||||
}
|
||||
|
@ -1497,54 +1545,41 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
|
|||
* only have one nop-out as a ping from us and targets should not
|
||||
* overflow us with nop-ins
|
||||
*/
|
||||
spin_lock_bh(&conn->taskqueuelock);
|
||||
check_mgmt:
|
||||
while (!list_empty(&conn->mgmtqueue)) {
|
||||
conn->task = list_entry(conn->mgmtqueue.next,
|
||||
struct iscsi_task, running);
|
||||
list_del_init(&conn->task->running);
|
||||
spin_unlock_bh(&conn->taskqueuelock);
|
||||
if (iscsi_prep_mgmt_task(conn, conn->task)) {
|
||||
task = list_entry(conn->mgmtqueue.next, struct iscsi_task,
|
||||
running);
|
||||
list_del_init(&task->running);
|
||||
if (iscsi_prep_mgmt_task(conn, task)) {
|
||||
/* regular RX path uses back_lock */
|
||||
spin_lock_bh(&conn->session->back_lock);
|
||||
__iscsi_put_task(conn->task);
|
||||
__iscsi_put_task(task);
|
||||
spin_unlock_bh(&conn->session->back_lock);
|
||||
conn->task = NULL;
|
||||
spin_lock_bh(&conn->taskqueuelock);
|
||||
continue;
|
||||
}
|
||||
rc = iscsi_xmit_task(conn);
|
||||
rc = iscsi_xmit_task(conn, task, false);
|
||||
if (rc)
|
||||
goto done;
|
||||
spin_lock_bh(&conn->taskqueuelock);
|
||||
}
|
||||
|
||||
/* process pending command queue */
|
||||
while (!list_empty(&conn->cmdqueue)) {
|
||||
conn->task = list_entry(conn->cmdqueue.next, struct iscsi_task,
|
||||
running);
|
||||
list_del_init(&conn->task->running);
|
||||
spin_unlock_bh(&conn->taskqueuelock);
|
||||
task = list_entry(conn->cmdqueue.next, struct iscsi_task,
|
||||
running);
|
||||
list_del_init(&task->running);
|
||||
if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
|
||||
fail_scsi_task(conn->task, DID_IMM_RETRY);
|
||||
spin_lock_bh(&conn->taskqueuelock);
|
||||
fail_scsi_task(task, DID_IMM_RETRY);
|
||||
continue;
|
||||
}
|
||||
rc = iscsi_prep_scsi_cmd_pdu(conn->task);
|
||||
rc = iscsi_prep_scsi_cmd_pdu(task);
|
||||
if (rc) {
|
||||
if (rc == -ENOMEM || rc == -EACCES) {
|
||||
spin_lock_bh(&conn->taskqueuelock);
|
||||
list_add_tail(&conn->task->running,
|
||||
&conn->cmdqueue);
|
||||
conn->task = NULL;
|
||||
spin_unlock_bh(&conn->taskqueuelock);
|
||||
goto done;
|
||||
} else
|
||||
fail_scsi_task(conn->task, DID_ABORT);
|
||||
spin_lock_bh(&conn->taskqueuelock);
|
||||
if (rc == -ENOMEM || rc == -EACCES)
|
||||
fail_scsi_task(task, DID_IMM_RETRY);
|
||||
else
|
||||
fail_scsi_task(task, DID_ABORT);
|
||||
continue;
|
||||
}
|
||||
rc = iscsi_xmit_task(conn);
|
||||
rc = iscsi_xmit_task(conn, task, false);
|
||||
if (rc)
|
||||
goto done;
|
||||
/*
|
||||
|
@ -1552,7 +1587,6 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
|
|||
* we need to check the mgmt queue for nops that need to
|
||||
* be sent to aviod starvation
|
||||
*/
|
||||
spin_lock_bh(&conn->taskqueuelock);
|
||||
if (!list_empty(&conn->mgmtqueue))
|
||||
goto check_mgmt;
|
||||
}
|
||||
|
@ -1566,21 +1600,17 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
|
|||
|
||||
task = list_entry(conn->requeue.next, struct iscsi_task,
|
||||
running);
|
||||
|
||||
if (iscsi_check_tmf_restrictions(task, ISCSI_OP_SCSI_DATA_OUT))
|
||||
break;
|
||||
|
||||
conn->task = task;
|
||||
list_del_init(&conn->task->running);
|
||||
conn->task->state = ISCSI_TASK_RUNNING;
|
||||
spin_unlock_bh(&conn->taskqueuelock);
|
||||
rc = iscsi_xmit_task(conn);
|
||||
list_del_init(&task->running);
|
||||
rc = iscsi_xmit_task(conn, task, true);
|
||||
if (rc)
|
||||
goto done;
|
||||
spin_lock_bh(&conn->taskqueuelock);
|
||||
if (!list_empty(&conn->mgmtqueue))
|
||||
goto check_mgmt;
|
||||
}
|
||||
spin_unlock_bh(&conn->taskqueuelock);
|
||||
spin_unlock_bh(&conn->session->frwd_lock);
|
||||
return -ENODATA;
|
||||
|
||||
|
@ -1746,9 +1776,7 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
|
|||
goto prepd_reject;
|
||||
}
|
||||
} else {
|
||||
spin_lock_bh(&conn->taskqueuelock);
|
||||
list_add_tail(&task->running, &conn->cmdqueue);
|
||||
spin_unlock_bh(&conn->taskqueuelock);
|
||||
iscsi_conn_queue_work(conn);
|
||||
}
|
||||
|
||||
|
@ -1855,27 +1883,39 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
|
|||
}
|
||||
|
||||
/*
|
||||
* Fail commands. session lock held and recv side suspended and xmit
|
||||
* thread flushed
|
||||
* Fail commands. session frwd lock held and xmit thread flushed.
|
||||
*/
|
||||
static void fail_scsi_tasks(struct iscsi_conn *conn, u64 lun, int error)
|
||||
{
|
||||
struct iscsi_session *session = conn->session;
|
||||
struct iscsi_task *task;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < conn->session->cmds_max; i++) {
|
||||
task = conn->session->cmds[i];
|
||||
spin_lock_bh(&session->back_lock);
|
||||
for (i = 0; i < session->cmds_max; i++) {
|
||||
task = session->cmds[i];
|
||||
if (!task->sc || task->state == ISCSI_TASK_FREE)
|
||||
continue;
|
||||
|
||||
if (lun != -1 && lun != task->sc->device->lun)
|
||||
continue;
|
||||
|
||||
ISCSI_DBG_SESSION(conn->session,
|
||||
__iscsi_get_task(task);
|
||||
spin_unlock_bh(&session->back_lock);
|
||||
|
||||
ISCSI_DBG_SESSION(session,
|
||||
"failing sc %p itt 0x%x state %d\n",
|
||||
task->sc, task->itt, task->state);
|
||||
fail_scsi_task(task, error);
|
||||
|
||||
spin_unlock_bh(&session->frwd_lock);
|
||||
iscsi_put_task(task);
|
||||
spin_lock_bh(&session->frwd_lock);
|
||||
|
||||
spin_lock_bh(&session->back_lock);
|
||||
}
|
||||
|
||||
spin_unlock_bh(&session->back_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1953,6 +1993,7 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
|
|||
ISCSI_DBG_EH(session, "scsi cmd %p timedout\n", sc);
|
||||
|
||||
spin_lock_bh(&session->frwd_lock);
|
||||
spin_lock(&session->back_lock);
|
||||
task = (struct iscsi_task *)sc->SCp.ptr;
|
||||
if (!task) {
|
||||
/*
|
||||
|
@ -1960,8 +2001,11 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
|
|||
* so let timeout code complete it now.
|
||||
*/
|
||||
rc = BLK_EH_DONE;
|
||||
spin_unlock(&session->back_lock);
|
||||
goto done;
|
||||
}
|
||||
__iscsi_get_task(task);
|
||||
spin_unlock(&session->back_lock);
|
||||
|
||||
if (session->state != ISCSI_STATE_LOGGED_IN) {
|
||||
/*
|
||||
|
@ -2020,6 +2064,7 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
|
|||
goto done;
|
||||
}
|
||||
|
||||
spin_lock(&session->back_lock);
|
||||
for (i = 0; i < conn->session->cmds_max; i++) {
|
||||
running_task = conn->session->cmds[i];
|
||||
if (!running_task->sc || running_task == task ||
|
||||
|
@ -2052,10 +2097,12 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
|
|||
"last xfer %lu/%lu. Last check %lu.\n",
|
||||
task->last_xfer, running_task->last_xfer,
|
||||
task->last_timeout);
|
||||
spin_unlock(&session->back_lock);
|
||||
rc = BLK_EH_RESET_TIMER;
|
||||
goto done;
|
||||
}
|
||||
}
|
||||
spin_unlock(&session->back_lock);
|
||||
|
||||
/* Assumes nop timeout is shorter than scsi cmd timeout */
|
||||
if (task->have_checked_conn)
|
||||
|
@ -2077,9 +2124,12 @@ enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *sc)
|
|||
rc = BLK_EH_RESET_TIMER;
|
||||
|
||||
done:
|
||||
if (task)
|
||||
task->last_timeout = jiffies;
|
||||
spin_unlock_bh(&session->frwd_lock);
|
||||
|
||||
if (task) {
|
||||
task->last_timeout = jiffies;
|
||||
iscsi_put_task(task);
|
||||
}
|
||||
ISCSI_DBG_EH(session, "return %s\n", rc == BLK_EH_RESET_TIMER ?
|
||||
"timer reset" : "shutdown or nh");
|
||||
return rc;
|
||||
|
@ -2187,15 +2237,20 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
|
|||
conn->eh_abort_cnt++;
|
||||
age = session->age;
|
||||
|
||||
spin_lock(&session->back_lock);
|
||||
task = (struct iscsi_task *)sc->SCp.ptr;
|
||||
ISCSI_DBG_EH(session, "aborting [sc %p itt 0x%x]\n",
|
||||
sc, task->itt);
|
||||
|
||||
/* task completed before time out */
|
||||
if (!task->sc) {
|
||||
if (!task || !task->sc) {
|
||||
/* task completed before time out */
|
||||
ISCSI_DBG_EH(session, "sc completed while abort in progress\n");
|
||||
goto success;
|
||||
|
||||
spin_unlock(&session->back_lock);
|
||||
spin_unlock_bh(&session->frwd_lock);
|
||||
mutex_unlock(&session->eh_mutex);
|
||||
return SUCCESS;
|
||||
}
|
||||
ISCSI_DBG_EH(session, "aborting [sc %p itt 0x%x]\n", sc, task->itt);
|
||||
__iscsi_get_task(task);
|
||||
spin_unlock(&session->back_lock);
|
||||
|
||||
if (task->state == ISCSI_TASK_PENDING) {
|
||||
fail_scsi_task(task, DID_ABORT);
|
||||
|
@ -2257,6 +2312,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
|
|||
success_unlocked:
|
||||
ISCSI_DBG_EH(session, "abort success [sc %p itt 0x%x]\n",
|
||||
sc, task->itt);
|
||||
iscsi_put_task(task);
|
||||
mutex_unlock(&session->eh_mutex);
|
||||
return SUCCESS;
|
||||
|
||||
|
@ -2265,6 +2321,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
|
|||
failed_unlocked:
|
||||
ISCSI_DBG_EH(session, "abort failed [sc %p itt 0x%x]\n", sc,
|
||||
task ? task->itt : 0);
|
||||
iscsi_put_task(task);
|
||||
mutex_unlock(&session->eh_mutex);
|
||||
return FAILED;
|
||||
}
|
||||
|
@ -2591,6 +2648,56 @@ void iscsi_pool_free(struct iscsi_pool *q)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(iscsi_pool_free);
|
||||
|
||||
int iscsi_host_get_max_scsi_cmds(struct Scsi_Host *shost,
|
||||
uint16_t requested_cmds_max)
|
||||
{
|
||||
int scsi_cmds, total_cmds = requested_cmds_max;
|
||||
|
||||
check:
|
||||
if (!total_cmds)
|
||||
total_cmds = ISCSI_DEF_XMIT_CMDS_MAX;
|
||||
/*
|
||||
* The iscsi layer needs some tasks for nop handling and tmfs,
|
||||
* so the cmds_max must at least be greater than ISCSI_MGMT_CMDS_MAX
|
||||
* + 1 command for scsi IO.
|
||||
*/
|
||||
if (total_cmds < ISCSI_TOTAL_CMDS_MIN) {
|
||||
printk(KERN_ERR "iscsi: invalid max cmds of %d. Must be a power of two that is at least %d.\n",
|
||||
total_cmds, ISCSI_TOTAL_CMDS_MIN);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (total_cmds > ISCSI_TOTAL_CMDS_MAX) {
|
||||
printk(KERN_INFO "iscsi: invalid max cmds of %d. Must be a power of 2 less than or equal to %d. Using %d.\n",
|
||||
requested_cmds_max, ISCSI_TOTAL_CMDS_MAX,
|
||||
ISCSI_TOTAL_CMDS_MAX);
|
||||
total_cmds = ISCSI_TOTAL_CMDS_MAX;
|
||||
}
|
||||
|
||||
if (!is_power_of_2(total_cmds)) {
|
||||
total_cmds = rounddown_pow_of_two(total_cmds);
|
||||
if (total_cmds < ISCSI_TOTAL_CMDS_MIN) {
|
||||
printk(KERN_ERR "iscsi: invalid max cmds of %d. Must be a power of 2 greater than %d.\n", requested_cmds_max, ISCSI_TOTAL_CMDS_MIN);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
printk(KERN_INFO "iscsi: invalid max cmds %d. Must be a power of 2. Rounding max cmds down to %d.\n",
|
||||
requested_cmds_max, total_cmds);
|
||||
}
|
||||
|
||||
scsi_cmds = total_cmds - ISCSI_MGMT_CMDS_MAX;
|
||||
if (shost->can_queue && scsi_cmds > shost->can_queue) {
|
||||
total_cmds = shost->can_queue;
|
||||
|
||||
printk(KERN_INFO "iscsi: requested max cmds %u is higher than driver limit. Using driver limit %u\n",
|
||||
requested_cmds_max, shost->can_queue);
|
||||
goto check;
|
||||
}
|
||||
|
||||
return scsi_cmds;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iscsi_host_get_max_scsi_cmds);
|
||||
|
||||
/**
|
||||
* iscsi_host_add - add host to system
|
||||
* @shost: scsi host
|
||||
|
@ -2681,8 +2788,6 @@ void iscsi_host_remove(struct Scsi_Host *shost)
|
|||
flush_signals(current);
|
||||
|
||||
scsi_remove_host(shost);
|
||||
if (ihost->workq)
|
||||
destroy_workqueue(ihost->workq);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iscsi_host_remove);
|
||||
|
||||
|
@ -2690,6 +2795,9 @@ void iscsi_host_free(struct Scsi_Host *shost)
|
|||
{
|
||||
struct iscsi_host *ihost = shost_priv(shost);
|
||||
|
||||
if (ihost->workq)
|
||||
destroy_workqueue(ihost->workq);
|
||||
|
||||
kfree(ihost->netdev);
|
||||
kfree(ihost->hwaddress);
|
||||
kfree(ihost->initiatorname);
|
||||
|
@ -2743,7 +2851,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
|
|||
struct iscsi_host *ihost = shost_priv(shost);
|
||||
struct iscsi_session *session;
|
||||
struct iscsi_cls_session *cls_session;
|
||||
int cmd_i, scsi_cmds, total_cmds = cmds_max;
|
||||
int cmd_i, scsi_cmds;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&ihost->lock, flags);
|
||||
|
@ -2754,37 +2862,9 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
|
|||
ihost->num_sessions++;
|
||||
spin_unlock_irqrestore(&ihost->lock, flags);
|
||||
|
||||
if (!total_cmds)
|
||||
total_cmds = ISCSI_DEF_XMIT_CMDS_MAX;
|
||||
/*
|
||||
* The iscsi layer needs some tasks for nop handling and tmfs,
|
||||
* so the cmds_max must at least be greater than ISCSI_MGMT_CMDS_MAX
|
||||
* + 1 command for scsi IO.
|
||||
*/
|
||||
if (total_cmds < ISCSI_TOTAL_CMDS_MIN) {
|
||||
printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
|
||||
"must be a power of two that is at least %d.\n",
|
||||
total_cmds, ISCSI_TOTAL_CMDS_MIN);
|
||||
scsi_cmds = iscsi_host_get_max_scsi_cmds(shost, cmds_max);
|
||||
if (scsi_cmds < 0)
|
||||
goto dec_session_count;
|
||||
}
|
||||
|
||||
if (total_cmds > ISCSI_TOTAL_CMDS_MAX) {
|
||||
printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
|
||||
"must be a power of 2 less than or equal to %d.\n",
|
||||
cmds_max, ISCSI_TOTAL_CMDS_MAX);
|
||||
total_cmds = ISCSI_TOTAL_CMDS_MAX;
|
||||
}
|
||||
|
||||
if (!is_power_of_2(total_cmds)) {
|
||||
printk(KERN_ERR "iscsi: invalid can_queue of %d. can_queue "
|
||||
"must be a power of 2.\n", total_cmds);
|
||||
total_cmds = rounddown_pow_of_two(total_cmds);
|
||||
if (total_cmds < ISCSI_TOTAL_CMDS_MIN)
|
||||
goto dec_session_count;
|
||||
printk(KERN_INFO "iscsi: Rounding can_queue to %d.\n",
|
||||
total_cmds);
|
||||
}
|
||||
scsi_cmds = total_cmds - ISCSI_MGMT_CMDS_MAX;
|
||||
|
||||
cls_session = iscsi_alloc_session(shost, iscsit,
|
||||
sizeof(struct iscsi_session) +
|
||||
|
@ -2800,7 +2880,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
|
|||
session->lu_reset_timeout = 15;
|
||||
session->abort_timeout = 10;
|
||||
session->scsi_cmds_max = scsi_cmds;
|
||||
session->cmds_max = total_cmds;
|
||||
session->cmds_max = scsi_cmds + ISCSI_MGMT_CMDS_MAX;
|
||||
session->queued_cmdsn = session->cmdsn = initial_cmdsn;
|
||||
session->exp_cmdsn = initial_cmdsn + 1;
|
||||
session->max_cmdsn = initial_cmdsn + 1;
|
||||
|
@ -2919,7 +2999,6 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
|
|||
INIT_LIST_HEAD(&conn->mgmtqueue);
|
||||
INIT_LIST_HEAD(&conn->cmdqueue);
|
||||
INIT_LIST_HEAD(&conn->requeue);
|
||||
spin_lock_init(&conn->taskqueuelock);
|
||||
INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
|
||||
|
||||
/* allocate login_task used for the login/text sequences */
|
||||
|
@ -3085,10 +3164,16 @@ fail_mgmt_tasks(struct iscsi_session *session, struct iscsi_conn *conn)
|
|||
ISCSI_DBG_SESSION(conn->session,
|
||||
"failing mgmt itt 0x%x state %d\n",
|
||||
task->itt, task->state);
|
||||
|
||||
spin_lock_bh(&session->back_lock);
|
||||
if (cleanup_queued_task(task)) {
|
||||
spin_unlock_bh(&session->back_lock);
|
||||
continue;
|
||||
}
|
||||
|
||||
state = ISCSI_TASK_ABRT_SESS_RECOV;
|
||||
if (task->state == ISCSI_TASK_PENDING)
|
||||
state = ISCSI_TASK_COMPLETED;
|
||||
spin_lock_bh(&session->back_lock);
|
||||
iscsi_complete_task(task, state);
|
||||
spin_unlock_bh(&session->back_lock);
|
||||
}
|
||||
|
@ -3188,6 +3273,13 @@ int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
|
|||
session->leadconn = conn;
|
||||
spin_unlock_bh(&session->frwd_lock);
|
||||
|
||||
/*
|
||||
* The target could have reduced it's window size between logins, so
|
||||
* we have to reset max/exp cmdsn so we can see the new values.
|
||||
*/
|
||||
spin_lock_bh(&session->back_lock);
|
||||
session->max_cmdsn = session->exp_cmdsn = session->cmdsn + 1;
|
||||
spin_unlock_bh(&session->back_lock);
|
||||
/*
|
||||
* Unblock xmitworker(), Login Phase will pass through.
|
||||
*/
|
||||
|
|
|
@ -524,48 +524,79 @@ static int iscsi_tcp_data_in(struct iscsi_conn *conn, struct iscsi_task *task)
|
|||
/**
|
||||
* iscsi_tcp_r2t_rsp - iSCSI R2T Response processing
|
||||
* @conn: iscsi connection
|
||||
* @task: scsi command task
|
||||
* @hdr: PDU header
|
||||
*/
|
||||
static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
|
||||
static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
|
||||
{
|
||||
struct iscsi_session *session = conn->session;
|
||||
struct iscsi_tcp_task *tcp_task = task->dd_data;
|
||||
struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
|
||||
struct iscsi_r2t_rsp *rhdr = (struct iscsi_r2t_rsp *)tcp_conn->in.hdr;
|
||||
struct iscsi_tcp_task *tcp_task;
|
||||
struct iscsi_tcp_conn *tcp_conn;
|
||||
struct iscsi_r2t_rsp *rhdr;
|
||||
struct iscsi_r2t_info *r2t;
|
||||
int r2tsn = be32_to_cpu(rhdr->r2tsn);
|
||||
struct iscsi_task *task;
|
||||
u32 data_length;
|
||||
u32 data_offset;
|
||||
int r2tsn;
|
||||
int rc;
|
||||
|
||||
spin_lock(&session->back_lock);
|
||||
task = iscsi_itt_to_ctask(conn, hdr->itt);
|
||||
if (!task) {
|
||||
spin_unlock(&session->back_lock);
|
||||
return ISCSI_ERR_BAD_ITT;
|
||||
} else if (task->sc->sc_data_direction != DMA_TO_DEVICE) {
|
||||
spin_unlock(&session->back_lock);
|
||||
return ISCSI_ERR_PROTO;
|
||||
}
|
||||
/*
|
||||
* A bad target might complete the cmd before we have handled R2Ts
|
||||
* so get a ref to the task that will be dropped in the xmit path.
|
||||
*/
|
||||
if (task->state != ISCSI_TASK_RUNNING) {
|
||||
spin_unlock(&session->back_lock);
|
||||
/* Let the path that got the early rsp complete it */
|
||||
return 0;
|
||||
}
|
||||
task->last_xfer = jiffies;
|
||||
__iscsi_get_task(task);
|
||||
|
||||
tcp_conn = conn->dd_data;
|
||||
rhdr = (struct iscsi_r2t_rsp *)tcp_conn->in.hdr;
|
||||
/* fill-in new R2T associated with the task */
|
||||
iscsi_update_cmdsn(session, (struct iscsi_nopin *)rhdr);
|
||||
spin_unlock(&session->back_lock);
|
||||
|
||||
if (tcp_conn->in.datalen) {
|
||||
iscsi_conn_printk(KERN_ERR, conn,
|
||||
"invalid R2t with datalen %d\n",
|
||||
tcp_conn->in.datalen);
|
||||
return ISCSI_ERR_DATALEN;
|
||||
rc = ISCSI_ERR_DATALEN;
|
||||
goto put_task;
|
||||
}
|
||||
|
||||
tcp_task = task->dd_data;
|
||||
r2tsn = be32_to_cpu(rhdr->r2tsn);
|
||||
if (tcp_task->exp_datasn != r2tsn){
|
||||
ISCSI_DBG_TCP(conn, "task->exp_datasn(%d) != rhdr->r2tsn(%d)\n",
|
||||
tcp_task->exp_datasn, r2tsn);
|
||||
return ISCSI_ERR_R2TSN;
|
||||
rc = ISCSI_ERR_R2TSN;
|
||||
goto put_task;
|
||||
}
|
||||
|
||||
/* fill-in new R2T associated with the task */
|
||||
iscsi_update_cmdsn(session, (struct iscsi_nopin*)rhdr);
|
||||
|
||||
if (!task->sc || session->state != ISCSI_STATE_LOGGED_IN) {
|
||||
if (session->state != ISCSI_STATE_LOGGED_IN) {
|
||||
iscsi_conn_printk(KERN_INFO, conn,
|
||||
"dropping R2T itt %d in recovery.\n",
|
||||
task->itt);
|
||||
return 0;
|
||||
rc = 0;
|
||||
goto put_task;
|
||||
}
|
||||
|
||||
data_length = be32_to_cpu(rhdr->data_length);
|
||||
if (data_length == 0) {
|
||||
iscsi_conn_printk(KERN_ERR, conn,
|
||||
"invalid R2T with zero data len\n");
|
||||
return ISCSI_ERR_DATALEN;
|
||||
rc = ISCSI_ERR_DATALEN;
|
||||
goto put_task;
|
||||
}
|
||||
|
||||
if (data_length > session->max_burst)
|
||||
|
@ -579,7 +610,8 @@ static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
|
|||
"invalid R2T with data len %u at offset %u "
|
||||
"and total length %d\n", data_length,
|
||||
data_offset, task->sc->sdb.length);
|
||||
return ISCSI_ERR_DATALEN;
|
||||
rc = ISCSI_ERR_DATALEN;
|
||||
goto put_task;
|
||||
}
|
||||
|
||||
spin_lock(&tcp_task->pool2queue);
|
||||
|
@ -589,7 +621,8 @@ static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
|
|||
"Target has sent more R2Ts than it "
|
||||
"negotiated for or driver has leaked.\n");
|
||||
spin_unlock(&tcp_task->pool2queue);
|
||||
return ISCSI_ERR_PROTO;
|
||||
rc = ISCSI_ERR_PROTO;
|
||||
goto put_task;
|
||||
}
|
||||
|
||||
r2t->exp_statsn = rhdr->statsn;
|
||||
|
@ -607,6 +640,10 @@ static int iscsi_tcp_r2t_rsp(struct iscsi_conn *conn, struct iscsi_task *task)
|
|||
|
||||
iscsi_requeue_task(task);
|
||||
return 0;
|
||||
|
||||
put_task:
|
||||
iscsi_put_task(task);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -730,20 +767,11 @@ iscsi_tcp_hdr_dissect(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
|
|||
rc = iscsi_complete_pdu(conn, hdr, NULL, 0);
|
||||
break;
|
||||
case ISCSI_OP_R2T:
|
||||
spin_lock(&conn->session->back_lock);
|
||||
task = iscsi_itt_to_ctask(conn, hdr->itt);
|
||||
spin_unlock(&conn->session->back_lock);
|
||||
if (!task)
|
||||
rc = ISCSI_ERR_BAD_ITT;
|
||||
else if (ahslen)
|
||||
if (ahslen) {
|
||||
rc = ISCSI_ERR_AHSLEN;
|
||||
else if (task->sc->sc_data_direction == DMA_TO_DEVICE) {
|
||||
task->last_xfer = jiffies;
|
||||
spin_lock(&conn->session->frwd_lock);
|
||||
rc = iscsi_tcp_r2t_rsp(conn, task);
|
||||
spin_unlock(&conn->session->frwd_lock);
|
||||
} else
|
||||
rc = ISCSI_ERR_PROTO;
|
||||
break;
|
||||
}
|
||||
rc = iscsi_tcp_r2t_rsp(conn, hdr);
|
||||
break;
|
||||
case ISCSI_OP_LOGIN_RSP:
|
||||
case ISCSI_OP_TEXT_RSP:
|
||||
|
|
|
@ -3648,25 +3648,16 @@ _base_get_msix_index(struct MPT3SAS_ADAPTER *ioc,
|
|||
base_mod64(atomic64_add_return(1,
|
||||
&ioc->total_io_cnt), ioc->reply_queue_count) : 0;
|
||||
|
||||
if (scmd && ioc->shost->nr_hw_queues > 1) {
|
||||
u32 tag = blk_mq_unique_tag(scmd->request);
|
||||
|
||||
return blk_mq_unique_tag_to_hwq(tag) +
|
||||
ioc->high_iops_queues;
|
||||
}
|
||||
|
||||
return ioc->cpu_msix_table[raw_smp_processor_id()];
|
||||
}
|
||||
|
||||
/**
|
||||
* _base_sdev_nr_inflight_request -get number of inflight requests
|
||||
* of a request queue.
|
||||
* @q: request_queue object
|
||||
*
|
||||
* returns number of inflight request of a request queue.
|
||||
*/
|
||||
inline unsigned long
|
||||
_base_sdev_nr_inflight_request(struct request_queue *q)
|
||||
{
|
||||
struct blk_mq_hw_ctx *hctx = q->queue_hw_ctx[0];
|
||||
|
||||
return atomic_read(&hctx->nr_active);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* _base_get_high_iops_msix_index - get the msix index of
|
||||
* high iops queues
|
||||
|
@ -3686,7 +3677,8 @@ _base_get_high_iops_msix_index(struct MPT3SAS_ADAPTER *ioc,
|
|||
* reply queues in terms of batch count 16 when outstanding
|
||||
* IOs on the target device is >=8.
|
||||
*/
|
||||
if (_base_sdev_nr_inflight_request(scmd->device->request_queue) >
|
||||
|
||||
if (atomic_read(&scmd->device->device_busy) >
|
||||
MPT3SAS_DEVICE_HIGH_IOPS_DEPTH)
|
||||
return base_mod64((
|
||||
atomic64_add_return(1, &ioc->high_iops_outstanding) /
|
||||
|
@ -3739,8 +3731,23 @@ mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
|
|||
struct scsi_cmnd *scmd)
|
||||
{
|
||||
struct scsiio_tracker *request = scsi_cmd_priv(scmd);
|
||||
unsigned int tag = scmd->request->tag;
|
||||
u16 smid;
|
||||
u32 tag, unique_tag;
|
||||
|
||||
unique_tag = blk_mq_unique_tag(scmd->request);
|
||||
tag = blk_mq_unique_tag_to_tag(unique_tag);
|
||||
|
||||
/*
|
||||
* Store hw queue number corresponding to the tag.
|
||||
* This hw queue number is used later to determine
|
||||
* the unique_tag using the logic below. This unique_tag
|
||||
* is used to retrieve the scmd pointer corresponding
|
||||
* to tag using scsi_host_find_tag() API.
|
||||
*
|
||||
* tag = smid - 1;
|
||||
* unique_tag = ioc->io_queue_num[tag] << BLK_MQ_UNIQUE_TAG_BITS | tag;
|
||||
*/
|
||||
ioc->io_queue_num[tag] = blk_mq_unique_tag_to_hwq(unique_tag);
|
||||
|
||||
smid = tag + 1;
|
||||
request->cb_idx = cb_idx;
|
||||
|
@ -3831,6 +3838,7 @@ mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
|
|||
|
||||
mpt3sas_base_clear_st(ioc, st);
|
||||
_base_recovery_check(ioc);
|
||||
ioc->io_queue_num[smid - 1] = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -5362,6 +5370,9 @@ _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
|
|||
kfree(ioc->chain_lookup);
|
||||
ioc->chain_lookup = NULL;
|
||||
}
|
||||
|
||||
kfree(ioc->io_queue_num);
|
||||
ioc->io_queue_num = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -5641,7 +5652,8 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
|
|||
reply_post_free_sz = ioc->reply_post_queue_depth *
|
||||
sizeof(Mpi2DefaultReplyDescriptor_t);
|
||||
rdpq_sz = reply_post_free_sz * RDPQ_MAX_INDEX_IN_ONE_CHUNK;
|
||||
if (_base_is_controller_msix_enabled(ioc) && !ioc->rdpq_array_enable)
|
||||
if ((_base_is_controller_msix_enabled(ioc) && !ioc->rdpq_array_enable)
|
||||
|| (ioc->reply_queue_count < RDPQ_MAX_INDEX_IN_ONE_CHUNK))
|
||||
rdpq_sz = reply_post_free_sz * ioc->reply_queue_count;
|
||||
ret = base_alloc_rdpq_dma_pool(ioc, rdpq_sz);
|
||||
if (ret == -EAGAIN) {
|
||||
|
@ -5772,6 +5784,11 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
|
|||
ioc_info(ioc, "internal(0x%p): depth(%d), start smid(%d)\n",
|
||||
ioc->internal,
|
||||
ioc->internal_depth, ioc->internal_smid));
|
||||
|
||||
ioc->io_queue_num = kcalloc(ioc->scsiio_depth,
|
||||
sizeof(u16), GFP_KERNEL);
|
||||
if (!ioc->io_queue_num)
|
||||
goto out;
|
||||
/*
|
||||
* The number of NVMe page sized blocks needed is:
|
||||
* (((sg_tablesize * 8) - 1) / (page_size - 8)) + 1
|
||||
|
@ -8174,8 +8191,11 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
|
|||
ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
|
||||
if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT ||
|
||||
(ioc_state & MPI2_IOC_STATE_MASK) ==
|
||||
MPI2_IOC_STATE_COREDUMP)
|
||||
MPI2_IOC_STATE_COREDUMP) {
|
||||
is_fault = 1;
|
||||
ioc->htb_rel.trigger_info_dwords[1] =
|
||||
(ioc_state & MPI2_DOORBELL_DATA_MASK);
|
||||
}
|
||||
}
|
||||
_base_pre_reset_handler(ioc);
|
||||
mpt3sas_wait_for_commands_to_complete(ioc);
|
||||
|
|
|
@ -77,8 +77,8 @@
|
|||
#define MPT3SAS_DRIVER_NAME "mpt3sas"
|
||||
#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
|
||||
#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
|
||||
#define MPT3SAS_DRIVER_VERSION "36.100.00.00"
|
||||
#define MPT3SAS_MAJOR_VERSION 36
|
||||
#define MPT3SAS_DRIVER_VERSION "37.100.00.00"
|
||||
#define MPT3SAS_MAJOR_VERSION 37
|
||||
#define MPT3SAS_MINOR_VERSION 100
|
||||
#define MPT3SAS_BUILD_VERSION 0
|
||||
#define MPT3SAS_RELEASE_VERSION 00
|
||||
|
@ -1073,6 +1073,50 @@ struct hba_port {
|
|||
|
||||
#define MULTIPATH_DISABLED_PORT_ID 0xFF
|
||||
|
||||
/**
|
||||
* struct htb_rel_query - diagnostic buffer release reason
|
||||
* @unique_id - unique id associated with this buffer.
|
||||
* @buffer_rel_condition - Release condition ioctl/sysfs/reset
|
||||
* @reserved
|
||||
* @trigger_type - Master/Event/scsi/MPI
|
||||
* @trigger_info_dwords - Data Correspondig to trigger type
|
||||
*/
|
||||
struct htb_rel_query {
|
||||
u16 buffer_rel_condition;
|
||||
u16 reserved;
|
||||
u32 trigger_type;
|
||||
u32 trigger_info_dwords[2];
|
||||
};
|
||||
|
||||
/* Buffer_rel_condition bit fields */
|
||||
|
||||
/* Bit 0 - Diag Buffer not Released */
|
||||
#define MPT3_DIAG_BUFFER_NOT_RELEASED (0x00)
|
||||
/* Bit 0 - Diag Buffer Released */
|
||||
#define MPT3_DIAG_BUFFER_RELEASED (0x01)
|
||||
|
||||
/*
|
||||
* Bit 1 - Diag Buffer Released by IOCTL,
|
||||
* This bit is valid only if Bit 0 is one
|
||||
*/
|
||||
#define MPT3_DIAG_BUFFER_REL_IOCTL (0x02 | MPT3_DIAG_BUFFER_RELEASED)
|
||||
|
||||
/*
|
||||
* Bit 2 - Diag Buffer Released by Trigger,
|
||||
* This bit is valid only if Bit 0 is one
|
||||
*/
|
||||
#define MPT3_DIAG_BUFFER_REL_TRIGGER (0x04 | MPT3_DIAG_BUFFER_RELEASED)
|
||||
|
||||
/*
|
||||
* Bit 3 - Diag Buffer Released by SysFs,
|
||||
* This bit is valid only if Bit 0 is one
|
||||
*/
|
||||
#define MPT3_DIAG_BUFFER_REL_SYSFS (0x08 | MPT3_DIAG_BUFFER_RELEASED)
|
||||
|
||||
/* DIAG RESET Master trigger flags */
|
||||
#define MPT_DIAG_RESET_ISSUED_BY_DRIVER 0x00000000
|
||||
#define MPT_DIAG_RESET_ISSUED_BY_USER 0x00000001
|
||||
|
||||
typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
|
||||
/**
|
||||
* struct MPT3SAS_ADAPTER - per adapter struct
|
||||
|
@ -1439,6 +1483,7 @@ struct MPT3SAS_ADAPTER {
|
|||
spinlock_t scsi_lookup_lock;
|
||||
int pending_io_count;
|
||||
wait_queue_head_t reset_wq;
|
||||
u16 *io_queue_num;
|
||||
|
||||
/* PCIe SGL */
|
||||
struct dma_pool *pcie_sgl_dma_pool;
|
||||
|
@ -1529,6 +1574,8 @@ struct MPT3SAS_ADAPTER {
|
|||
u32 diagnostic_flags[MPI2_DIAG_BUF_TYPE_COUNT];
|
||||
u32 ring_buffer_offset;
|
||||
u32 ring_buffer_sz;
|
||||
struct htb_rel_query htb_rel;
|
||||
u8 reset_from_user;
|
||||
u8 is_warpdrive;
|
||||
u8 is_mcpu_endpoint;
|
||||
u8 hide_ir_msg;
|
||||
|
@ -1565,6 +1612,7 @@ struct mpt3sas_debugfs_buffer {
|
|||
};
|
||||
|
||||
#define MPT_DRV_SUPPORT_BITMAP_MEMMOVE 0x00000001
|
||||
#define MPT_DRV_SUPPORT_BITMAP_ADDNLQUERY 0x00000002
|
||||
|
||||
typedef u8 (*MPT_CALLBACK)(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
|
||||
u32 reply);
|
||||
|
|
|
@ -479,6 +479,8 @@ void mpt3sas_ctl_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
|
|||
ioc_info(ioc,
|
||||
"%s: Releasing the trace buffer due to adapter reset.",
|
||||
__func__);
|
||||
ioc->htb_rel.buffer_rel_condition =
|
||||
MPT3_DIAG_BUFFER_REL_TRIGGER;
|
||||
mpt3sas_send_diag_release(ioc, i, &issue_reset);
|
||||
}
|
||||
}
|
||||
|
@ -1334,6 +1336,7 @@ _ctl_do_reset(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
|
|||
dctlprintk(ioc, ioc_info(ioc, "%s: enter\n",
|
||||
__func__));
|
||||
|
||||
ioc->reset_from_user = 1;
|
||||
retval = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
|
||||
ioc_info(ioc,
|
||||
"Ioctl: host reset: %s\n", ((!retval) ? "SUCCESS" : "FAILED"));
|
||||
|
@ -1687,6 +1690,9 @@ _ctl_diag_register_2(struct MPT3SAS_ADAPTER *ioc,
|
|||
request_data = ioc->diag_buffer[buffer_type];
|
||||
request_data_sz = diag_register->requested_buffer_size;
|
||||
ioc->unique_id[buffer_type] = diag_register->unique_id;
|
||||
/* Reset ioc variables used for additional query commands */
|
||||
ioc->reset_from_user = 0;
|
||||
memset(&ioc->htb_rel, 0, sizeof(struct htb_rel_query));
|
||||
ioc->diag_buffer_status[buffer_type] &=
|
||||
MPT3_DIAG_BUFFER_IS_DRIVER_ALLOCATED;
|
||||
memcpy(ioc->product_specific[buffer_type],
|
||||
|
@ -2469,7 +2475,61 @@ _ctl_diag_read_buffer(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
|
|||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* _ctl_addnl_diag_query - query relevant info associated with diag buffers
|
||||
* @ioc: per adapter object
|
||||
* @arg: user space buffer containing ioctl content
|
||||
*
|
||||
* The application will send only unique_id. Driver will
|
||||
* inspect unique_id first, if valid, fill the details related to cause
|
||||
* for diag buffer release.
|
||||
*/
|
||||
static long
|
||||
_ctl_addnl_diag_query(struct MPT3SAS_ADAPTER *ioc, void __user *arg)
|
||||
{
|
||||
struct mpt3_addnl_diag_query karg;
|
||||
u32 buffer_type = 0;
|
||||
|
||||
if (copy_from_user(&karg, arg, sizeof(karg))) {
|
||||
pr_err("%s: failure at %s:%d/%s()!\n",
|
||||
ioc->name, __FILE__, __LINE__, __func__);
|
||||
return -EFAULT;
|
||||
}
|
||||
dctlprintk(ioc, ioc_info(ioc, "%s\n", __func__));
|
||||
if (karg.unique_id == 0) {
|
||||
ioc_err(ioc, "%s: unique_id is(0x%08x)\n",
|
||||
__func__, karg.unique_id);
|
||||
return -EPERM;
|
||||
}
|
||||
buffer_type = _ctl_diag_get_bufftype(ioc, karg.unique_id);
|
||||
if (buffer_type == MPT3_DIAG_UID_NOT_FOUND) {
|
||||
ioc_err(ioc, "%s: buffer with unique_id(0x%08x) not found\n",
|
||||
__func__, karg.unique_id);
|
||||
return -EPERM;
|
||||
}
|
||||
memset(&karg.buffer_rel_condition, 0, sizeof(struct htb_rel_query));
|
||||
if ((ioc->diag_buffer_status[buffer_type] &
|
||||
MPT3_DIAG_BUFFER_IS_REGISTERED) == 0) {
|
||||
ioc_info(ioc, "%s: buffer_type(0x%02x) is not registered\n",
|
||||
__func__, buffer_type);
|
||||
goto out;
|
||||
}
|
||||
if ((ioc->diag_buffer_status[buffer_type] &
|
||||
MPT3_DIAG_BUFFER_IS_RELEASED) == 0) {
|
||||
ioc_err(ioc, "%s: buffer_type(0x%02x) is not released\n",
|
||||
__func__, buffer_type);
|
||||
return -EPERM;
|
||||
}
|
||||
memcpy(&karg.buffer_rel_condition, &ioc->htb_rel,
|
||||
sizeof(struct htb_rel_query));
|
||||
out:
|
||||
if (copy_to_user(arg, &karg, sizeof(struct mpt3_addnl_diag_query))) {
|
||||
ioc_err(ioc, "%s: unable to write mpt3_addnl_diag_query data @ %p\n",
|
||||
__func__, arg);
|
||||
return -EFAULT;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
/**
|
||||
|
@ -2533,7 +2593,7 @@ _ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg,
|
|||
struct MPT3SAS_ADAPTER *ioc;
|
||||
struct mpt3_ioctl_header ioctl_header;
|
||||
enum block_state state;
|
||||
long ret = -EINVAL;
|
||||
long ret = -ENOIOCTLCMD;
|
||||
|
||||
/* get IOCTL header */
|
||||
if (copy_from_user(&ioctl_header, (char __user *)arg,
|
||||
|
@ -2643,6 +2703,10 @@ _ctl_ioctl_main(struct file *file, unsigned int cmd, void __user *arg,
|
|||
if (_IOC_SIZE(cmd) == sizeof(struct mpt3_diag_read_buffer))
|
||||
ret = _ctl_diag_read_buffer(ioc, arg);
|
||||
break;
|
||||
case MPT3ADDNLDIAGQUERY:
|
||||
if (_IOC_SIZE(cmd) == sizeof(struct mpt3_addnl_diag_query))
|
||||
ret = _ctl_addnl_diag_query(ioc, arg);
|
||||
break;
|
||||
default:
|
||||
dctlprintk(ioc,
|
||||
ioc_info(ioc, "unsupported ioctl opcode(0x%08x)\n",
|
||||
|
@ -3425,6 +3489,7 @@ host_trace_buffer_enable_store(struct device *cdev,
|
|||
MPT3_DIAG_BUFFER_IS_RELEASED))
|
||||
goto out;
|
||||
ioc_info(ioc, "releasing host trace buffer\n");
|
||||
ioc->htb_rel.buffer_rel_condition = MPT3_DIAG_BUFFER_REL_SYSFS;
|
||||
mpt3sas_send_diag_release(ioc, MPI2_DIAG_BUF_TYPE_TRACE,
|
||||
&issue_reset);
|
||||
}
|
||||
|
|
|
@ -94,6 +94,8 @@
|
|||
struct mpt3_diag_query)
|
||||
#define MPT3DIAGREADBUFFER _IOWR(MPT3_MAGIC_NUMBER, 30, \
|
||||
struct mpt3_diag_read_buffer)
|
||||
#define MPT3ADDNLDIAGQUERY _IOWR(MPT3_MAGIC_NUMBER, 32, \
|
||||
struct mpt3_addnl_diag_query)
|
||||
|
||||
/* Trace Buffer default UniqueId */
|
||||
#define MPT2DIAGBUFFUNIQUEID (0x07075900)
|
||||
|
@ -430,4 +432,24 @@ struct mpt3_diag_read_buffer {
|
|||
uint32_t diagnostic_data[1];
|
||||
};
|
||||
|
||||
/**
|
||||
* struct mpt3_addnl_diag_query - diagnostic buffer release reason
|
||||
* @hdr - generic header
|
||||
* @unique_id - unique id associated with this buffer.
|
||||
* @buffer_rel_condition - Release condition ioctl/sysfs/reset
|
||||
* @reserved1
|
||||
* @trigger_type - Master/Event/scsi/MPI
|
||||
* @trigger_info_dwords - Data Correspondig to trigger type
|
||||
* @reserved2
|
||||
*/
|
||||
struct mpt3_addnl_diag_query {
|
||||
struct mpt3_ioctl_header hdr;
|
||||
uint32_t unique_id;
|
||||
uint16_t buffer_rel_condition;
|
||||
uint16_t reserved1;
|
||||
uint32_t trigger_type;
|
||||
uint32_t trigger_info_dwords[2];
|
||||
uint32_t reserved2[2];
|
||||
};
|
||||
|
||||
#endif /* MPT3SAS_CTL_H_INCLUDED */
|
||||
|
|
|
@ -54,6 +54,7 @@
|
|||
#include <linux/interrupt.h>
|
||||
#include <linux/aer.h>
|
||||
#include <linux/raid_class.h>
|
||||
#include <linux/blk-mq-pci.h>
|
||||
#include <asm/unaligned.h>
|
||||
|
||||
#include "mpt3sas_base.h"
|
||||
|
@ -168,6 +169,11 @@ MODULE_PARM_DESC(multipath_on_hba,
|
|||
"\t SAS 2.0 & SAS 3.0 HBA - This will be disabled,\n\t\t"
|
||||
"\t SAS 3.5 HBA - This will be enabled)");
|
||||
|
||||
static int host_tagset_enable = 1;
|
||||
module_param(host_tagset_enable, int, 0444);
|
||||
MODULE_PARM_DESC(host_tagset_enable,
|
||||
"Shared host tagset enable/disable Default: enable(1)");
|
||||
|
||||
/* raid transport support */
|
||||
static struct raid_template *mpt3sas_raid_template;
|
||||
static struct raid_template *mpt2sas_raid_template;
|
||||
|
@ -1743,10 +1749,12 @@ mpt3sas_scsih_scsi_lookup_get(struct MPT3SAS_ADAPTER *ioc, u16 smid)
|
|||
struct scsi_cmnd *scmd = NULL;
|
||||
struct scsiio_tracker *st;
|
||||
Mpi25SCSIIORequest_t *mpi_request;
|
||||
u16 tag = smid - 1;
|
||||
|
||||
if (smid > 0 &&
|
||||
smid <= ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT) {
|
||||
u32 unique_tag = smid - 1;
|
||||
u32 unique_tag =
|
||||
ioc->io_queue_num[tag] << BLK_MQ_UNIQUE_TAG_BITS | tag;
|
||||
|
||||
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
|
||||
|
||||
|
@ -11599,6 +11607,22 @@ scsih_scan_finished(struct Scsi_Host *shost, unsigned long time)
|
|||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* scsih_map_queues - map reply queues with request queues
|
||||
* @shost: SCSI host pointer
|
||||
*/
|
||||
static int scsih_map_queues(struct Scsi_Host *shost)
|
||||
{
|
||||
struct MPT3SAS_ADAPTER *ioc =
|
||||
(struct MPT3SAS_ADAPTER *)shost->hostdata;
|
||||
|
||||
if (ioc->shost->nr_hw_queues == 1)
|
||||
return 0;
|
||||
|
||||
return blk_mq_pci_map_queues(&shost->tag_set.map[HCTX_TYPE_DEFAULT],
|
||||
ioc->pdev, ioc->high_iops_queues);
|
||||
}
|
||||
|
||||
/* shost template for SAS 2.0 HBA devices */
|
||||
static struct scsi_host_template mpt2sas_driver_template = {
|
||||
.module = THIS_MODULE,
|
||||
|
@ -11666,6 +11690,7 @@ static struct scsi_host_template mpt3sas_driver_template = {
|
|||
.sdev_attrs = mpt3sas_dev_attrs,
|
||||
.track_queue_depth = 1,
|
||||
.cmd_size = sizeof(struct scsiio_tracker),
|
||||
.map_queues = scsih_map_queues,
|
||||
};
|
||||
|
||||
/* raid transport support for SAS 3.0 HBA devices */
|
||||
|
@ -11922,6 +11947,8 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
* Enable MEMORY MOVE support flag.
|
||||
*/
|
||||
ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_MEMMOVE;
|
||||
/* Enable ADDITIONAL QUERY support flag. */
|
||||
ioc->drv_support_bitmap |= MPT_DRV_SUPPORT_BITMAP_ADDNLQUERY;
|
||||
|
||||
ioc->enable_sdev_max_qd = enable_sdev_max_qd;
|
||||
|
||||
|
@ -12028,6 +12055,21 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
} else
|
||||
ioc->hide_drives = 0;
|
||||
|
||||
shost->host_tagset = 0;
|
||||
shost->nr_hw_queues = 1;
|
||||
|
||||
if (ioc->is_gen35_ioc && ioc->reply_queue_count > 1 &&
|
||||
host_tagset_enable && ioc->smp_affinity_enable) {
|
||||
|
||||
shost->host_tagset = 1;
|
||||
shost->nr_hw_queues =
|
||||
ioc->reply_queue_count - ioc->high_iops_queues;
|
||||
|
||||
dev_info(&ioc->pdev->dev,
|
||||
"Max SCSIIO MPT commands: %d shared with nr_hw_queues = %d\n",
|
||||
shost->can_queue, shost->nr_hw_queues);
|
||||
}
|
||||
|
||||
rv = scsi_add_host(shost, &pdev->dev);
|
||||
if (rv) {
|
||||
ioc_err(ioc, "failure at %s:%d/%s()!\n",
|
||||
|
|
|
@ -132,6 +132,35 @@ mpt3sas_process_trigger_data(struct MPT3SAS_ADAPTER *ioc,
|
|||
&issue_reset);
|
||||
}
|
||||
|
||||
ioc->htb_rel.buffer_rel_condition = MPT3_DIAG_BUFFER_REL_TRIGGER;
|
||||
if (event_data) {
|
||||
ioc->htb_rel.trigger_type = event_data->trigger_type;
|
||||
switch (event_data->trigger_type) {
|
||||
case MPT3SAS_TRIGGER_SCSI:
|
||||
memcpy(&ioc->htb_rel.trigger_info_dwords,
|
||||
&event_data->u.scsi,
|
||||
sizeof(struct SL_WH_SCSI_TRIGGER_T));
|
||||
break;
|
||||
case MPT3SAS_TRIGGER_MPI:
|
||||
memcpy(&ioc->htb_rel.trigger_info_dwords,
|
||||
&event_data->u.mpi,
|
||||
sizeof(struct SL_WH_MPI_TRIGGER_T));
|
||||
break;
|
||||
case MPT3SAS_TRIGGER_MASTER:
|
||||
ioc->htb_rel.trigger_info_dwords[0] =
|
||||
event_data->u.master.MasterData;
|
||||
break;
|
||||
case MPT3SAS_TRIGGER_EVENT:
|
||||
memcpy(&ioc->htb_rel.trigger_info_dwords,
|
||||
&event_data->u.event,
|
||||
sizeof(struct SL_WH_EVENT_TRIGGER_T));
|
||||
break;
|
||||
default:
|
||||
ioc_err(ioc, "%d - Is not a valid Trigger type\n",
|
||||
event_data->trigger_type);
|
||||
break;
|
||||
}
|
||||
}
|
||||
_mpt3sas_raise_sigio(ioc, event_data);
|
||||
|
||||
dTriggerDiagPrintk(ioc, ioc_info(ioc, "%s: exit\n",
|
||||
|
@ -201,9 +230,14 @@ mpt3sas_trigger_master(struct MPT3SAS_ADAPTER *ioc, u32 trigger_bitmask)
|
|||
event_data.u.master.MasterData = trigger_bitmask;
|
||||
|
||||
if (trigger_bitmask & MASTER_TRIGGER_FW_FAULT ||
|
||||
trigger_bitmask & MASTER_TRIGGER_ADAPTER_RESET)
|
||||
trigger_bitmask & MASTER_TRIGGER_ADAPTER_RESET) {
|
||||
ioc->htb_rel.trigger_type = MPT3SAS_TRIGGER_MASTER;
|
||||
ioc->htb_rel.trigger_info_dwords[0] = trigger_bitmask;
|
||||
if (ioc->reset_from_user)
|
||||
ioc->htb_rel.trigger_info_dwords[1] =
|
||||
MPT_DIAG_RESET_ISSUED_BY_USER;
|
||||
_mpt3sas_raise_sigio(ioc, &event_data);
|
||||
else
|
||||
} else
|
||||
mpt3sas_send_trigger_data_event(ioc, &event_data);
|
||||
|
||||
out:
|
||||
|
|
|
@ -244,7 +244,7 @@ struct pmcraid_ioarcb {
|
|||
__u8 hrrq_id;
|
||||
__u8 cdb[PMCRAID_MAX_CDB_LEN];
|
||||
struct pmcraid_ioarcb_add_data add_data;
|
||||
} __attribute__((packed, aligned(PMCRAID_IOARCB_ALIGNMENT)));
|
||||
};
|
||||
|
||||
/* well known resource handle values */
|
||||
#define PMCRAID_IOA_RES_HANDLE 0xffffffff
|
||||
|
@ -1040,8 +1040,8 @@ struct pmcraid_passthrough_ioctl_buffer {
|
|||
struct pmcraid_ioctl_header ioctl_header;
|
||||
struct pmcraid_ioarcb ioarcb;
|
||||
struct pmcraid_ioasa ioasa;
|
||||
u8 request_buffer[1];
|
||||
} __attribute__ ((packed));
|
||||
u8 request_buffer[];
|
||||
} __attribute__ ((packed, aligned(PMCRAID_IOARCB_ALIGNMENT)));
|
||||
|
||||
/*
|
||||
* keys to differentiate between driver handled IOCTLs and passthrough
|
||||
|
|
|
@ -981,8 +981,7 @@ void qlt_free_session_done(struct work_struct *work)
|
|||
int rc;
|
||||
|
||||
if (!own ||
|
||||
(own &&
|
||||
(own->iocb.u.isp24.status_subcode == ELS_PLOGI))) {
|
||||
(own->iocb.u.isp24.status_subcode == ELS_PLOGI)) {
|
||||
rc = qla2x00_post_async_logout_work(vha, sess,
|
||||
NULL);
|
||||
if (rc != QLA_SUCCESS)
|
||||
|
|
|
@ -841,7 +841,7 @@ static int __qla4xxx_is_chap_active(struct device *dev, void *data)
|
|||
sess = cls_session->dd_data;
|
||||
ddb_entry = sess->dd_data;
|
||||
|
||||
if (iscsi_session_chkready(cls_session))
|
||||
if (iscsi_is_session_online(cls_session))
|
||||
goto exit_is_chap_active;
|
||||
|
||||
if (ddb_entry->chap_tbl_idx == *chap_tbl_idx)
|
||||
|
|
|
@ -1701,10 +1701,8 @@ static const char *iscsi_session_state_name(int state)
|
|||
|
||||
int iscsi_session_chkready(struct iscsi_cls_session *session)
|
||||
{
|
||||
unsigned long flags;
|
||||
int err;
|
||||
|
||||
spin_lock_irqsave(&session->lock, flags);
|
||||
switch (session->state) {
|
||||
case ISCSI_SESSION_LOGGED_IN:
|
||||
err = 0;
|
||||
|
@ -1719,7 +1717,6 @@ int iscsi_session_chkready(struct iscsi_cls_session *session)
|
|||
err = DID_NO_CONNECT << 16;
|
||||
break;
|
||||
}
|
||||
spin_unlock_irqrestore(&session->lock, flags);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iscsi_session_chkready);
|
||||
|
|
|
@ -707,9 +707,9 @@ static int sd_sec_submit(void *data, u16 spsp, u8 secp, void *buffer,
|
|||
put_unaligned_be16(spsp, &cdb[2]);
|
||||
put_unaligned_be32(len, &cdb[6]);
|
||||
|
||||
ret = scsi_execute_req(sdev, cdb,
|
||||
send ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
|
||||
buffer, len, NULL, SD_TIMEOUT, sdkp->max_retries, NULL);
|
||||
ret = scsi_execute(sdev, cdb, send ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
|
||||
buffer, len, NULL, NULL, SD_TIMEOUT, sdkp->max_retries, 0,
|
||||
RQF_PM, NULL);
|
||||
return ret <= 0 ? ret : -EIO;
|
||||
}
|
||||
#endif /* CONFIG_BLK_SED_OPAL */
|
||||
|
@ -3379,10 +3379,12 @@ static int sd_probe(struct device *dev)
|
|||
sdp->type != TYPE_RBC)
|
||||
goto out;
|
||||
|
||||
#ifndef CONFIG_BLK_DEV_ZONED
|
||||
if (sdp->type == TYPE_ZBC)
|
||||
if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) && sdp->type == TYPE_ZBC) {
|
||||
sdev_printk(KERN_WARNING, sdp,
|
||||
"Unsupported ZBC host-managed device.\n");
|
||||
goto out;
|
||||
#endif
|
||||
}
|
||||
|
||||
SCSI_LOG_HLQUEUE(3, sdev_printk(KERN_INFO, sdp,
|
||||
"sd_probe\n"));
|
||||
|
||||
|
|
|
@ -704,6 +704,7 @@ int sd_zbc_revalidate_zones(struct scsi_disk *sdkp)
|
|||
unsigned int nr_zones = sdkp->rev_nr_zones;
|
||||
u32 max_append;
|
||||
int ret = 0;
|
||||
unsigned int flags;
|
||||
|
||||
/*
|
||||
* For all zoned disks, initialize zone append emulation data if not
|
||||
|
@ -736,16 +737,19 @@ int sd_zbc_revalidate_zones(struct scsi_disk *sdkp)
|
|||
disk->queue->nr_zones == nr_zones)
|
||||
goto unlock;
|
||||
|
||||
flags = memalloc_noio_save();
|
||||
sdkp->zone_blocks = zone_blocks;
|
||||
sdkp->nr_zones = nr_zones;
|
||||
sdkp->rev_wp_offset = kvcalloc(nr_zones, sizeof(u32), GFP_NOIO);
|
||||
sdkp->rev_wp_offset = kvcalloc(nr_zones, sizeof(u32), GFP_KERNEL);
|
||||
if (!sdkp->rev_wp_offset) {
|
||||
ret = -ENOMEM;
|
||||
memalloc_noio_restore(flags);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
ret = blk_revalidate_disk_zones(disk, sd_zbc_revalidate_zones_cb);
|
||||
|
||||
memalloc_noio_restore(flags);
|
||||
kvfree(sdkp->rev_wp_offset);
|
||||
sdkp->rev_wp_offset = NULL;
|
||||
|
||||
|
|
|
@ -451,6 +451,8 @@ static void ufshcd_print_evt(struct ufs_hba *hba, u32 id,
|
|||
|
||||
if (!found)
|
||||
dev_err(hba->dev, "No record of %s\n", err_name);
|
||||
else
|
||||
dev_err(hba->dev, "%s: total cnt=%llu\n", err_name, e->cnt);
|
||||
}
|
||||
|
||||
static void ufshcd_print_evt_hist(struct ufs_hba *hba)
|
||||
|
@ -1866,7 +1868,7 @@ static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
|
|||
{
|
||||
struct ufs_hba *hba = dev_get_drvdata(dev);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms);
|
||||
return sysfs_emit(buf, "%lu\n", hba->clk_gating.delay_ms);
|
||||
}
|
||||
|
||||
static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
|
||||
|
@ -1889,7 +1891,7 @@ static ssize_t ufshcd_clkgate_enable_show(struct device *dev,
|
|||
{
|
||||
struct ufs_hba *hba = dev_get_drvdata(dev);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", hba->clk_gating.is_enabled);
|
||||
return sysfs_emit(buf, "%d\n", hba->clk_gating.is_enabled);
|
||||
}
|
||||
|
||||
static ssize_t ufshcd_clkgate_enable_store(struct device *dev,
|
||||
|
|
|
@ -570,7 +570,7 @@ enum ufshcd_quirks {
|
|||
/*
|
||||
* This quirk allows only sg entries aligned with page size.
|
||||
*/
|
||||
UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE = 1 << 13,
|
||||
UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE = 1 << 14,
|
||||
};
|
||||
|
||||
enum ufshcd_caps {
|
||||
|
|
|
@ -1006,7 +1006,7 @@ static void tgt_agent_fetch_work(struct work_struct *work)
|
|||
agent->state = AGENT_STATE_SUSPENDED;
|
||||
|
||||
spin_unlock_bh(&agent->lock);
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
static struct sbp_target_agent *sbp_target_agent_register(
|
||||
|
|
|
@ -3739,6 +3739,7 @@ core_scsi3_pri_read_keys(struct se_cmd *cmd)
|
|||
spin_unlock(&dev->t10_pr.registration_lock);
|
||||
|
||||
put_unaligned_be32(add_len, &buf[4]);
|
||||
target_set_cmd_data_length(cmd, 8 + add_len);
|
||||
|
||||
transport_kunmap_data_sg(cmd);
|
||||
|
||||
|
@ -3757,7 +3758,7 @@ core_scsi3_pri_read_reservation(struct se_cmd *cmd)
|
|||
struct t10_pr_registration *pr_reg;
|
||||
unsigned char *buf;
|
||||
u64 pr_res_key;
|
||||
u32 add_len = 16; /* Hardcoded to 16 when a reservation is held. */
|
||||
u32 add_len = 0;
|
||||
|
||||
if (cmd->data_length < 8) {
|
||||
pr_err("PRIN SA READ_RESERVATIONS SCSI Data Length: %u"
|
||||
|
@ -3775,8 +3776,9 @@ core_scsi3_pri_read_reservation(struct se_cmd *cmd)
|
|||
pr_reg = dev->dev_pr_res_holder;
|
||||
if (pr_reg) {
|
||||
/*
|
||||
* Set the hardcoded Additional Length
|
||||
* Set the Additional Length to 16 when a reservation is held
|
||||
*/
|
||||
add_len = 16;
|
||||
put_unaligned_be32(add_len, &buf[4]);
|
||||
|
||||
if (cmd->data_length < 22)
|
||||
|
@ -3812,6 +3814,8 @@ core_scsi3_pri_read_reservation(struct se_cmd *cmd)
|
|||
(pr_reg->pr_res_type & 0x0f);
|
||||
}
|
||||
|
||||
target_set_cmd_data_length(cmd, 8 + add_len);
|
||||
|
||||
err:
|
||||
spin_unlock(&dev->dev_reservation_lock);
|
||||
transport_kunmap_data_sg(cmd);
|
||||
|
@ -3830,7 +3834,7 @@ core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
|
|||
struct se_device *dev = cmd->se_dev;
|
||||
struct t10_reservation *pr_tmpl = &dev->t10_pr;
|
||||
unsigned char *buf;
|
||||
u16 add_len = 8; /* Hardcoded to 8. */
|
||||
u16 len = 8; /* Hardcoded to 8. */
|
||||
|
||||
if (cmd->data_length < 6) {
|
||||
pr_err("PRIN SA REPORT_CAPABILITIES SCSI Data Length:"
|
||||
|
@ -3842,7 +3846,7 @@ core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
|
|||
if (!buf)
|
||||
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
|
||||
|
||||
put_unaligned_be16(add_len, &buf[0]);
|
||||
put_unaligned_be16(len, &buf[0]);
|
||||
buf[2] |= 0x10; /* CRH: Compatible Reservation Hanlding bit. */
|
||||
buf[2] |= 0x08; /* SIP_C: Specify Initiator Ports Capable bit */
|
||||
buf[2] |= 0x04; /* ATP_C: All Target Ports Capable bit */
|
||||
|
@ -3871,6 +3875,8 @@ core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
|
|||
buf[4] |= 0x02; /* PR_TYPE_WRITE_EXCLUSIVE */
|
||||
buf[5] |= 0x01; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */
|
||||
|
||||
target_set_cmd_data_length(cmd, len);
|
||||
|
||||
transport_kunmap_data_sg(cmd);
|
||||
|
||||
return 0;
|
||||
|
@ -4031,6 +4037,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
|
|||
* Set ADDITIONAL_LENGTH
|
||||
*/
|
||||
put_unaligned_be32(add_len, &buf[4]);
|
||||
target_set_cmd_data_length(cmd, 8 + add_len);
|
||||
|
||||
transport_kunmap_data_sg(cmd);
|
||||
|
||||
|
|
|
@ -879,11 +879,9 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
|
|||
}
|
||||
EXPORT_SYMBOL(target_complete_cmd);
|
||||
|
||||
void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length)
|
||||
void target_set_cmd_data_length(struct se_cmd *cmd, int length)
|
||||
{
|
||||
if ((scsi_status == SAM_STAT_GOOD ||
|
||||
cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) &&
|
||||
length < cmd->data_length) {
|
||||
if (length < cmd->data_length) {
|
||||
if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
|
||||
cmd->residual_count += cmd->data_length - length;
|
||||
} else {
|
||||
|
@ -893,6 +891,15 @@ void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int len
|
|||
|
||||
cmd->data_length = length;
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL(target_set_cmd_data_length);
|
||||
|
||||
void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length)
|
||||
{
|
||||
if (scsi_status == SAM_STAT_GOOD ||
|
||||
cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) {
|
||||
target_set_cmd_data_length(cmd, length);
|
||||
}
|
||||
|
||||
target_complete_cmd(cmd, scsi_status);
|
||||
}
|
||||
|
|
|
@ -1566,6 +1566,88 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
|
|||
return &udev->se_dev;
|
||||
}
|
||||
|
||||
static void tcmu_dev_call_rcu(struct rcu_head *p)
|
||||
{
|
||||
struct se_device *dev = container_of(p, struct se_device, rcu_head);
|
||||
struct tcmu_dev *udev = TCMU_DEV(dev);
|
||||
|
||||
kfree(udev->uio_info.name);
|
||||
kfree(udev->name);
|
||||
kfree(udev);
|
||||
}
|
||||
|
||||
static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
|
||||
{
|
||||
if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
|
||||
kmem_cache_free(tcmu_cmd_cache, cmd);
|
||||
return 0;
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static void tcmu_blocks_release(struct radix_tree_root *blocks,
|
||||
int start, int end)
|
||||
{
|
||||
int i;
|
||||
struct page *page;
|
||||
|
||||
for (i = start; i < end; i++) {
|
||||
page = radix_tree_delete(blocks, i);
|
||||
if (page) {
|
||||
__free_page(page);
|
||||
atomic_dec(&global_db_count);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void tcmu_remove_all_queued_tmr(struct tcmu_dev *udev)
|
||||
{
|
||||
struct tcmu_tmr *tmr, *tmp;
|
||||
|
||||
list_for_each_entry_safe(tmr, tmp, &udev->tmr_queue, queue_entry) {
|
||||
list_del_init(&tmr->queue_entry);
|
||||
kfree(tmr);
|
||||
}
|
||||
}
|
||||
|
||||
static void tcmu_dev_kref_release(struct kref *kref)
|
||||
{
|
||||
struct tcmu_dev *udev = container_of(kref, struct tcmu_dev, kref);
|
||||
struct se_device *dev = &udev->se_dev;
|
||||
struct tcmu_cmd *cmd;
|
||||
bool all_expired = true;
|
||||
int i;
|
||||
|
||||
vfree(udev->mb_addr);
|
||||
udev->mb_addr = NULL;
|
||||
|
||||
spin_lock_bh(&timed_out_udevs_lock);
|
||||
if (!list_empty(&udev->timedout_entry))
|
||||
list_del(&udev->timedout_entry);
|
||||
spin_unlock_bh(&timed_out_udevs_lock);
|
||||
|
||||
/* Upper layer should drain all requests before calling this */
|
||||
mutex_lock(&udev->cmdr_lock);
|
||||
idr_for_each_entry(&udev->commands, cmd, i) {
|
||||
if (tcmu_check_and_free_pending_cmd(cmd) != 0)
|
||||
all_expired = false;
|
||||
}
|
||||
/* There can be left over TMR cmds. Remove them. */
|
||||
tcmu_remove_all_queued_tmr(udev);
|
||||
if (!list_empty(&udev->qfull_queue))
|
||||
all_expired = false;
|
||||
idr_destroy(&udev->commands);
|
||||
WARN_ON(!all_expired);
|
||||
|
||||
tcmu_blocks_release(&udev->data_blocks, 0, udev->dbi_max + 1);
|
||||
bitmap_free(udev->data_bitmap);
|
||||
mutex_unlock(&udev->cmdr_lock);
|
||||
|
||||
pr_debug("dev_kref_release\n");
|
||||
|
||||
call_rcu(&dev->rcu_head, tcmu_dev_call_rcu);
|
||||
}
|
||||
|
||||
static void run_qfull_queue(struct tcmu_dev *udev, bool fail)
|
||||
{
|
||||
struct tcmu_cmd *tcmu_cmd, *tmp_cmd;
|
||||
|
@ -1678,6 +1760,25 @@ static struct page *tcmu_try_get_block_page(struct tcmu_dev *udev, uint32_t dbi)
|
|||
return page;
|
||||
}
|
||||
|
||||
static void tcmu_vma_open(struct vm_area_struct *vma)
|
||||
{
|
||||
struct tcmu_dev *udev = vma->vm_private_data;
|
||||
|
||||
pr_debug("vma_open\n");
|
||||
|
||||
kref_get(&udev->kref);
|
||||
}
|
||||
|
||||
static void tcmu_vma_close(struct vm_area_struct *vma)
|
||||
{
|
||||
struct tcmu_dev *udev = vma->vm_private_data;
|
||||
|
||||
pr_debug("vma_close\n");
|
||||
|
||||
/* release ref from tcmu_vma_open */
|
||||
kref_put(&udev->kref, tcmu_dev_kref_release);
|
||||
}
|
||||
|
||||
static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf)
|
||||
{
|
||||
struct tcmu_dev *udev = vmf->vma->vm_private_data;
|
||||
|
@ -1716,6 +1817,8 @@ static vm_fault_t tcmu_vma_fault(struct vm_fault *vmf)
|
|||
}
|
||||
|
||||
static const struct vm_operations_struct tcmu_vm_ops = {
|
||||
.open = tcmu_vma_open,
|
||||
.close = tcmu_vma_close,
|
||||
.fault = tcmu_vma_fault,
|
||||
};
|
||||
|
||||
|
@ -1732,6 +1835,8 @@ static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma)
|
|||
if (vma_pages(vma) != (udev->ring_size >> PAGE_SHIFT))
|
||||
return -EINVAL;
|
||||
|
||||
tcmu_vma_open(vma);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1744,93 +1849,12 @@ static int tcmu_open(struct uio_info *info, struct inode *inode)
|
|||
return -EBUSY;
|
||||
|
||||
udev->inode = inode;
|
||||
kref_get(&udev->kref);
|
||||
|
||||
pr_debug("open\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void tcmu_dev_call_rcu(struct rcu_head *p)
|
||||
{
|
||||
struct se_device *dev = container_of(p, struct se_device, rcu_head);
|
||||
struct tcmu_dev *udev = TCMU_DEV(dev);
|
||||
|
||||
kfree(udev->uio_info.name);
|
||||
kfree(udev->name);
|
||||
kfree(udev);
|
||||
}
|
||||
|
||||
static int tcmu_check_and_free_pending_cmd(struct tcmu_cmd *cmd)
|
||||
{
|
||||
if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
|
||||
kmem_cache_free(tcmu_cmd_cache, cmd);
|
||||
return 0;
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static void tcmu_blocks_release(struct radix_tree_root *blocks,
|
||||
int start, int end)
|
||||
{
|
||||
int i;
|
||||
struct page *page;
|
||||
|
||||
for (i = start; i < end; i++) {
|
||||
page = radix_tree_delete(blocks, i);
|
||||
if (page) {
|
||||
__free_page(page);
|
||||
atomic_dec(&global_db_count);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void tcmu_remove_all_queued_tmr(struct tcmu_dev *udev)
|
||||
{
|
||||
struct tcmu_tmr *tmr, *tmp;
|
||||
|
||||
list_for_each_entry_safe(tmr, tmp, &udev->tmr_queue, queue_entry) {
|
||||
list_del_init(&tmr->queue_entry);
|
||||
kfree(tmr);
|
||||
}
|
||||
}
|
||||
|
||||
static void tcmu_dev_kref_release(struct kref *kref)
|
||||
{
|
||||
struct tcmu_dev *udev = container_of(kref, struct tcmu_dev, kref);
|
||||
struct se_device *dev = &udev->se_dev;
|
||||
struct tcmu_cmd *cmd;
|
||||
bool all_expired = true;
|
||||
int i;
|
||||
|
||||
vfree(udev->mb_addr);
|
||||
udev->mb_addr = NULL;
|
||||
|
||||
spin_lock_bh(&timed_out_udevs_lock);
|
||||
if (!list_empty(&udev->timedout_entry))
|
||||
list_del(&udev->timedout_entry);
|
||||
spin_unlock_bh(&timed_out_udevs_lock);
|
||||
|
||||
/* Upper layer should drain all requests before calling this */
|
||||
mutex_lock(&udev->cmdr_lock);
|
||||
idr_for_each_entry(&udev->commands, cmd, i) {
|
||||
if (tcmu_check_and_free_pending_cmd(cmd) != 0)
|
||||
all_expired = false;
|
||||
}
|
||||
/* There can be left over TMR cmds. Remove them. */
|
||||
tcmu_remove_all_queued_tmr(udev);
|
||||
if (!list_empty(&udev->qfull_queue))
|
||||
all_expired = false;
|
||||
idr_destroy(&udev->commands);
|
||||
WARN_ON(!all_expired);
|
||||
|
||||
tcmu_blocks_release(&udev->data_blocks, 0, udev->dbi_max + 1);
|
||||
bitmap_free(udev->data_bitmap);
|
||||
mutex_unlock(&udev->cmdr_lock);
|
||||
|
||||
call_rcu(&dev->rcu_head, tcmu_dev_call_rcu);
|
||||
}
|
||||
|
||||
static int tcmu_release(struct uio_info *info, struct inode *inode)
|
||||
{
|
||||
struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
|
||||
|
@ -1838,8 +1862,7 @@ static int tcmu_release(struct uio_info *info, struct inode *inode)
|
|||
clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags);
|
||||
|
||||
pr_debug("close\n");
|
||||
/* release ref from open */
|
||||
kref_put(&udev->kref, tcmu_dev_kref_release);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -187,7 +187,7 @@ struct iscsi_conn {
|
|||
struct iscsi_task *task; /* xmit task in progress */
|
||||
|
||||
/* xmit */
|
||||
spinlock_t taskqueuelock; /* protects the next three lists */
|
||||
/* items must be added/deleted under frwd lock */
|
||||
struct list_head mgmtqueue; /* mgmt (control) xmit queue */
|
||||
struct list_head cmdqueue; /* data-path cmd queue */
|
||||
struct list_head requeue; /* tasks needing another run */
|
||||
|
@ -332,7 +332,7 @@ struct iscsi_session {
|
|||
* cmdsn, queued_cmdsn *
|
||||
* session resources: *
|
||||
* - cmdpool kfifo_out , *
|
||||
* - mgmtpool, */
|
||||
* - mgmtpool, queues */
|
||||
spinlock_t back_lock; /* protects cmdsn_exp *
|
||||
* cmdsn_max, *
|
||||
* cmdpool kfifo_in */
|
||||
|
@ -395,6 +395,8 @@ extern struct Scsi_Host *iscsi_host_alloc(struct scsi_host_template *sht,
|
|||
extern void iscsi_host_remove(struct Scsi_Host *shost);
|
||||
extern void iscsi_host_free(struct Scsi_Host *shost);
|
||||
extern int iscsi_target_alloc(struct scsi_target *starget);
|
||||
extern int iscsi_host_get_max_scsi_cmds(struct Scsi_Host *shost,
|
||||
uint16_t requested_cmds_max);
|
||||
|
||||
/*
|
||||
* session management
|
||||
|
|
|
@ -72,6 +72,7 @@ int transport_backend_register(const struct target_backend_ops *);
|
|||
void target_backend_unregister(const struct target_backend_ops *);
|
||||
|
||||
void target_complete_cmd(struct se_cmd *, u8);
|
||||
void target_set_cmd_data_length(struct se_cmd *, int);
|
||||
void target_complete_cmd_with_length(struct se_cmd *, u8, int);
|
||||
|
||||
void transport_copy_sense_to_cmd(struct se_cmd *, unsigned char *);
|
||||
|
|
Loading…
Reference in New Issue