Merge remote-tracking branch 'mkp-scsi/fixes' into fixes
This commit is contained in:
commit
e2a3a67302
|
@ -1241,19 +1241,32 @@ config SCSI_LPFC
|
||||||
tristate "Emulex LightPulse Fibre Channel Support"
|
tristate "Emulex LightPulse Fibre Channel Support"
|
||||||
depends on PCI && SCSI
|
depends on PCI && SCSI
|
||||||
depends on SCSI_FC_ATTRS
|
depends on SCSI_FC_ATTRS
|
||||||
depends on NVME_FC && NVME_TARGET_FC
|
|
||||||
select CRC_T10DIF
|
select CRC_T10DIF
|
||||||
help
|
---help---
|
||||||
This lpfc driver supports the Emulex LightPulse
|
This lpfc driver supports the Emulex LightPulse
|
||||||
Family of Fibre Channel PCI host adapters.
|
Family of Fibre Channel PCI host adapters.
|
||||||
|
|
||||||
config SCSI_LPFC_DEBUG_FS
|
config SCSI_LPFC_DEBUG_FS
|
||||||
bool "Emulex LightPulse Fibre Channel debugfs Support"
|
bool "Emulex LightPulse Fibre Channel debugfs Support"
|
||||||
depends on SCSI_LPFC && DEBUG_FS
|
depends on SCSI_LPFC && DEBUG_FS
|
||||||
help
|
---help---
|
||||||
This makes debugging information from the lpfc driver
|
This makes debugging information from the lpfc driver
|
||||||
available via the debugfs filesystem.
|
available via the debugfs filesystem.
|
||||||
|
|
||||||
|
config LPFC_NVME_INITIATOR
|
||||||
|
bool "Emulex LightPulse Fibre Channel NVME Initiator Support"
|
||||||
|
depends on SCSI_LPFC && NVME_FC
|
||||||
|
---help---
|
||||||
|
This enables NVME Initiator support in the Emulex lpfc driver.
|
||||||
|
|
||||||
|
config LPFC_NVME_TARGET
|
||||||
|
bool "Emulex LightPulse Fibre Channel NVME Initiator Support"
|
||||||
|
depends on SCSI_LPFC && NVME_TARGET_FC
|
||||||
|
---help---
|
||||||
|
This enables NVME Target support in the Emulex lpfc driver.
|
||||||
|
Target enablement must still be enabled on a per adapter
|
||||||
|
basis by module parameters.
|
||||||
|
|
||||||
config SCSI_SIM710
|
config SCSI_SIM710
|
||||||
tristate "Simple 53c710 SCSI support (Compaq, NCR machines)"
|
tristate "Simple 53c710 SCSI support (Compaq, NCR machines)"
|
||||||
depends on (EISA || MCA) && SCSI
|
depends on (EISA || MCA) && SCSI
|
||||||
|
|
|
@ -468,7 +468,7 @@ static int aac_src_check_health(struct aac_dev *dev)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
err_blink:
|
err_blink:
|
||||||
return (status > 16) & 0xFF;
|
return (status >> 16) & 0xFF;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline u32 aac_get_vector(struct aac_dev *dev)
|
static inline u32 aac_get_vector(struct aac_dev *dev)
|
||||||
|
|
|
@ -561,8 +561,12 @@ static void iscsi_complete_task(struct iscsi_task *task, int state)
|
||||||
WARN_ON_ONCE(task->state == ISCSI_TASK_FREE);
|
WARN_ON_ONCE(task->state == ISCSI_TASK_FREE);
|
||||||
task->state = state;
|
task->state = state;
|
||||||
|
|
||||||
if (!list_empty(&task->running))
|
spin_lock_bh(&conn->taskqueuelock);
|
||||||
|
if (!list_empty(&task->running)) {
|
||||||
|
pr_debug_once("%s while task on list", __func__);
|
||||||
list_del_init(&task->running);
|
list_del_init(&task->running);
|
||||||
|
}
|
||||||
|
spin_unlock_bh(&conn->taskqueuelock);
|
||||||
|
|
||||||
if (conn->task == task)
|
if (conn->task == task)
|
||||||
conn->task = NULL;
|
conn->task = NULL;
|
||||||
|
@ -784,7 +788,9 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
|
||||||
if (session->tt->xmit_task(task))
|
if (session->tt->xmit_task(task))
|
||||||
goto free_task;
|
goto free_task;
|
||||||
} else {
|
} else {
|
||||||
|
spin_lock_bh(&conn->taskqueuelock);
|
||||||
list_add_tail(&task->running, &conn->mgmtqueue);
|
list_add_tail(&task->running, &conn->mgmtqueue);
|
||||||
|
spin_unlock_bh(&conn->taskqueuelock);
|
||||||
iscsi_conn_queue_work(conn);
|
iscsi_conn_queue_work(conn);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1475,8 +1481,10 @@ void iscsi_requeue_task(struct iscsi_task *task)
|
||||||
* this may be on the requeue list already if the xmit_task callout
|
* this may be on the requeue list already if the xmit_task callout
|
||||||
* is handling the r2ts while we are adding new ones
|
* is handling the r2ts while we are adding new ones
|
||||||
*/
|
*/
|
||||||
|
spin_lock_bh(&conn->taskqueuelock);
|
||||||
if (list_empty(&task->running))
|
if (list_empty(&task->running))
|
||||||
list_add_tail(&task->running, &conn->requeue);
|
list_add_tail(&task->running, &conn->requeue);
|
||||||
|
spin_unlock_bh(&conn->taskqueuelock);
|
||||||
iscsi_conn_queue_work(conn);
|
iscsi_conn_queue_work(conn);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(iscsi_requeue_task);
|
EXPORT_SYMBOL_GPL(iscsi_requeue_task);
|
||||||
|
@ -1513,22 +1521,26 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
|
||||||
* only have one nop-out as a ping from us and targets should not
|
* only have one nop-out as a ping from us and targets should not
|
||||||
* overflow us with nop-ins
|
* overflow us with nop-ins
|
||||||
*/
|
*/
|
||||||
|
spin_lock_bh(&conn->taskqueuelock);
|
||||||
check_mgmt:
|
check_mgmt:
|
||||||
while (!list_empty(&conn->mgmtqueue)) {
|
while (!list_empty(&conn->mgmtqueue)) {
|
||||||
conn->task = list_entry(conn->mgmtqueue.next,
|
conn->task = list_entry(conn->mgmtqueue.next,
|
||||||
struct iscsi_task, running);
|
struct iscsi_task, running);
|
||||||
list_del_init(&conn->task->running);
|
list_del_init(&conn->task->running);
|
||||||
|
spin_unlock_bh(&conn->taskqueuelock);
|
||||||
if (iscsi_prep_mgmt_task(conn, conn->task)) {
|
if (iscsi_prep_mgmt_task(conn, conn->task)) {
|
||||||
/* regular RX path uses back_lock */
|
/* regular RX path uses back_lock */
|
||||||
spin_lock_bh(&conn->session->back_lock);
|
spin_lock_bh(&conn->session->back_lock);
|
||||||
__iscsi_put_task(conn->task);
|
__iscsi_put_task(conn->task);
|
||||||
spin_unlock_bh(&conn->session->back_lock);
|
spin_unlock_bh(&conn->session->back_lock);
|
||||||
conn->task = NULL;
|
conn->task = NULL;
|
||||||
|
spin_lock_bh(&conn->taskqueuelock);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
rc = iscsi_xmit_task(conn);
|
rc = iscsi_xmit_task(conn);
|
||||||
if (rc)
|
if (rc)
|
||||||
goto done;
|
goto done;
|
||||||
|
spin_lock_bh(&conn->taskqueuelock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* process pending command queue */
|
/* process pending command queue */
|
||||||
|
@ -1536,19 +1548,24 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
|
||||||
conn->task = list_entry(conn->cmdqueue.next, struct iscsi_task,
|
conn->task = list_entry(conn->cmdqueue.next, struct iscsi_task,
|
||||||
running);
|
running);
|
||||||
list_del_init(&conn->task->running);
|
list_del_init(&conn->task->running);
|
||||||
|
spin_unlock_bh(&conn->taskqueuelock);
|
||||||
if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
|
if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
|
||||||
fail_scsi_task(conn->task, DID_IMM_RETRY);
|
fail_scsi_task(conn->task, DID_IMM_RETRY);
|
||||||
|
spin_lock_bh(&conn->taskqueuelock);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
rc = iscsi_prep_scsi_cmd_pdu(conn->task);
|
rc = iscsi_prep_scsi_cmd_pdu(conn->task);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
if (rc == -ENOMEM || rc == -EACCES) {
|
if (rc == -ENOMEM || rc == -EACCES) {
|
||||||
|
spin_lock_bh(&conn->taskqueuelock);
|
||||||
list_add_tail(&conn->task->running,
|
list_add_tail(&conn->task->running,
|
||||||
&conn->cmdqueue);
|
&conn->cmdqueue);
|
||||||
conn->task = NULL;
|
conn->task = NULL;
|
||||||
|
spin_unlock_bh(&conn->taskqueuelock);
|
||||||
goto done;
|
goto done;
|
||||||
} else
|
} else
|
||||||
fail_scsi_task(conn->task, DID_ABORT);
|
fail_scsi_task(conn->task, DID_ABORT);
|
||||||
|
spin_lock_bh(&conn->taskqueuelock);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
rc = iscsi_xmit_task(conn);
|
rc = iscsi_xmit_task(conn);
|
||||||
|
@ -1559,6 +1576,7 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
|
||||||
* we need to check the mgmt queue for nops that need to
|
* we need to check the mgmt queue for nops that need to
|
||||||
* be sent to aviod starvation
|
* be sent to aviod starvation
|
||||||
*/
|
*/
|
||||||
|
spin_lock_bh(&conn->taskqueuelock);
|
||||||
if (!list_empty(&conn->mgmtqueue))
|
if (!list_empty(&conn->mgmtqueue))
|
||||||
goto check_mgmt;
|
goto check_mgmt;
|
||||||
}
|
}
|
||||||
|
@ -1578,12 +1596,15 @@ static int iscsi_data_xmit(struct iscsi_conn *conn)
|
||||||
conn->task = task;
|
conn->task = task;
|
||||||
list_del_init(&conn->task->running);
|
list_del_init(&conn->task->running);
|
||||||
conn->task->state = ISCSI_TASK_RUNNING;
|
conn->task->state = ISCSI_TASK_RUNNING;
|
||||||
|
spin_unlock_bh(&conn->taskqueuelock);
|
||||||
rc = iscsi_xmit_task(conn);
|
rc = iscsi_xmit_task(conn);
|
||||||
if (rc)
|
if (rc)
|
||||||
goto done;
|
goto done;
|
||||||
|
spin_lock_bh(&conn->taskqueuelock);
|
||||||
if (!list_empty(&conn->mgmtqueue))
|
if (!list_empty(&conn->mgmtqueue))
|
||||||
goto check_mgmt;
|
goto check_mgmt;
|
||||||
}
|
}
|
||||||
|
spin_unlock_bh(&conn->taskqueuelock);
|
||||||
spin_unlock_bh(&conn->session->frwd_lock);
|
spin_unlock_bh(&conn->session->frwd_lock);
|
||||||
return -ENODATA;
|
return -ENODATA;
|
||||||
|
|
||||||
|
@ -1739,7 +1760,9 @@ int iscsi_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *sc)
|
||||||
goto prepd_reject;
|
goto prepd_reject;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
spin_lock_bh(&conn->taskqueuelock);
|
||||||
list_add_tail(&task->running, &conn->cmdqueue);
|
list_add_tail(&task->running, &conn->cmdqueue);
|
||||||
|
spin_unlock_bh(&conn->taskqueuelock);
|
||||||
iscsi_conn_queue_work(conn);
|
iscsi_conn_queue_work(conn);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2897,6 +2920,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
|
||||||
INIT_LIST_HEAD(&conn->mgmtqueue);
|
INIT_LIST_HEAD(&conn->mgmtqueue);
|
||||||
INIT_LIST_HEAD(&conn->cmdqueue);
|
INIT_LIST_HEAD(&conn->cmdqueue);
|
||||||
INIT_LIST_HEAD(&conn->requeue);
|
INIT_LIST_HEAD(&conn->requeue);
|
||||||
|
spin_lock_init(&conn->taskqueuelock);
|
||||||
INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
|
INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
|
||||||
|
|
||||||
/* allocate login_task used for the login/text sequences */
|
/* allocate login_task used for the login/text sequences */
|
||||||
|
|
|
@ -99,12 +99,13 @@ struct lpfc_sli2_slim;
|
||||||
#define FC_MAX_ADPTMSG 64
|
#define FC_MAX_ADPTMSG 64
|
||||||
|
|
||||||
#define MAX_HBAEVT 32
|
#define MAX_HBAEVT 32
|
||||||
|
#define MAX_HBAS_NO_RESET 16
|
||||||
|
|
||||||
/* Number of MSI-X vectors the driver uses */
|
/* Number of MSI-X vectors the driver uses */
|
||||||
#define LPFC_MSIX_VECTORS 2
|
#define LPFC_MSIX_VECTORS 2
|
||||||
|
|
||||||
/* lpfc wait event data ready flag */
|
/* lpfc wait event data ready flag */
|
||||||
#define LPFC_DATA_READY (1<<0)
|
#define LPFC_DATA_READY 0 /* bit 0 */
|
||||||
|
|
||||||
/* queue dump line buffer size */
|
/* queue dump line buffer size */
|
||||||
#define LPFC_LBUF_SZ 128
|
#define LPFC_LBUF_SZ 128
|
||||||
|
@ -692,6 +693,7 @@ struct lpfc_hba {
|
||||||
* capability
|
* capability
|
||||||
*/
|
*/
|
||||||
#define HBA_NVME_IOQ_FLUSH 0x80000 /* NVME IO queues flushed. */
|
#define HBA_NVME_IOQ_FLUSH 0x80000 /* NVME IO queues flushed. */
|
||||||
|
#define NVME_XRI_ABORT_EVENT 0x100000
|
||||||
|
|
||||||
uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
|
uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
|
||||||
struct lpfc_dmabuf slim2p;
|
struct lpfc_dmabuf slim2p;
|
||||||
|
|
|
@ -3010,6 +3010,12 @@ MODULE_PARM_DESC(lpfc_poll, "FCP ring polling mode control:"
|
||||||
static DEVICE_ATTR(lpfc_poll, S_IRUGO | S_IWUSR,
|
static DEVICE_ATTR(lpfc_poll, S_IRUGO | S_IWUSR,
|
||||||
lpfc_poll_show, lpfc_poll_store);
|
lpfc_poll_show, lpfc_poll_store);
|
||||||
|
|
||||||
|
int lpfc_no_hba_reset_cnt;
|
||||||
|
unsigned long lpfc_no_hba_reset[MAX_HBAS_NO_RESET] = {
|
||||||
|
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
|
||||||
|
module_param_array(lpfc_no_hba_reset, ulong, &lpfc_no_hba_reset_cnt, 0444);
|
||||||
|
MODULE_PARM_DESC(lpfc_no_hba_reset, "WWPN of HBAs that should not be reset");
|
||||||
|
|
||||||
LPFC_ATTR(sli_mode, 0, 0, 3,
|
LPFC_ATTR(sli_mode, 0, 0, 3,
|
||||||
"SLI mode selector:"
|
"SLI mode selector:"
|
||||||
" 0 - auto (SLI-3 if supported),"
|
" 0 - auto (SLI-3 if supported),"
|
||||||
|
@ -4451,7 +4457,8 @@ lpfc_fcp_imax_store(struct device *dev, struct device_attribute *attr,
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
phba->cfg_fcp_imax = (uint32_t)val;
|
phba->cfg_fcp_imax = (uint32_t)val;
|
||||||
for (i = 0; i < phba->io_channel_irqs; i++)
|
|
||||||
|
for (i = 0; i < phba->io_channel_irqs; i += LPFC_MAX_EQ_DELAY_EQID_CNT)
|
||||||
lpfc_modify_hba_eq_delay(phba, i);
|
lpfc_modify_hba_eq_delay(phba, i);
|
||||||
|
|
||||||
return strlen(buf);
|
return strlen(buf);
|
||||||
|
|
|
@ -384,7 +384,7 @@ void lpfc_free_sysfs_attr(struct lpfc_vport *);
|
||||||
extern struct device_attribute *lpfc_hba_attrs[];
|
extern struct device_attribute *lpfc_hba_attrs[];
|
||||||
extern struct device_attribute *lpfc_vport_attrs[];
|
extern struct device_attribute *lpfc_vport_attrs[];
|
||||||
extern struct scsi_host_template lpfc_template;
|
extern struct scsi_host_template lpfc_template;
|
||||||
extern struct scsi_host_template lpfc_template_s3;
|
extern struct scsi_host_template lpfc_template_no_hr;
|
||||||
extern struct scsi_host_template lpfc_template_nvme;
|
extern struct scsi_host_template lpfc_template_nvme;
|
||||||
extern struct scsi_host_template lpfc_vport_template;
|
extern struct scsi_host_template lpfc_vport_template;
|
||||||
extern struct fc_function_template lpfc_transport_functions;
|
extern struct fc_function_template lpfc_transport_functions;
|
||||||
|
@ -554,3 +554,5 @@ void lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba,
|
||||||
struct lpfc_wcqe_complete *abts_cmpl);
|
struct lpfc_wcqe_complete *abts_cmpl);
|
||||||
extern int lpfc_enable_nvmet_cnt;
|
extern int lpfc_enable_nvmet_cnt;
|
||||||
extern unsigned long long lpfc_enable_nvmet[];
|
extern unsigned long long lpfc_enable_nvmet[];
|
||||||
|
extern int lpfc_no_hba_reset_cnt;
|
||||||
|
extern unsigned long lpfc_no_hba_reset[];
|
||||||
|
|
|
@ -939,8 +939,8 @@ lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
||||||
"FC4 x%08x, Data: x%08x x%08x\n",
|
"FC4 x%08x, Data: x%08x x%08x\n",
|
||||||
ndlp, did, ndlp->nlp_fc4_type,
|
ndlp, did, ndlp->nlp_fc4_type,
|
||||||
FC_TYPE_FCP, FC_TYPE_NVME);
|
FC_TYPE_FCP, FC_TYPE_NVME);
|
||||||
|
ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
|
||||||
}
|
}
|
||||||
ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE;
|
|
||||||
lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
|
lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE);
|
||||||
lpfc_issue_els_prli(vport, ndlp, 0);
|
lpfc_issue_els_prli(vport, ndlp, 0);
|
||||||
} else
|
} else
|
||||||
|
|
|
@ -3653,17 +3653,6 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
|
||||||
idiag.ptr_private = phba->sli4_hba.nvmels_cq;
|
idiag.ptr_private = phba->sli4_hba.nvmels_cq;
|
||||||
goto pass_check;
|
goto pass_check;
|
||||||
}
|
}
|
||||||
/* NVME LS complete queue */
|
|
||||||
if (phba->sli4_hba.nvmels_cq &&
|
|
||||||
phba->sli4_hba.nvmels_cq->queue_id == queid) {
|
|
||||||
/* Sanity check */
|
|
||||||
rc = lpfc_idiag_que_param_check(
|
|
||||||
phba->sli4_hba.nvmels_cq, index, count);
|
|
||||||
if (rc)
|
|
||||||
goto error_out;
|
|
||||||
idiag.ptr_private = phba->sli4_hba.nvmels_cq;
|
|
||||||
goto pass_check;
|
|
||||||
}
|
|
||||||
/* FCP complete queue */
|
/* FCP complete queue */
|
||||||
if (phba->sli4_hba.fcp_cq) {
|
if (phba->sli4_hba.fcp_cq) {
|
||||||
for (qidx = 0; qidx < phba->cfg_fcp_io_channel;
|
for (qidx = 0; qidx < phba->cfg_fcp_io_channel;
|
||||||
|
@ -3738,17 +3727,6 @@ lpfc_idiag_queacc_write(struct file *file, const char __user *buf,
|
||||||
idiag.ptr_private = phba->sli4_hba.nvmels_wq;
|
idiag.ptr_private = phba->sli4_hba.nvmels_wq;
|
||||||
goto pass_check;
|
goto pass_check;
|
||||||
}
|
}
|
||||||
/* NVME LS work queue */
|
|
||||||
if (phba->sli4_hba.nvmels_wq &&
|
|
||||||
phba->sli4_hba.nvmels_wq->queue_id == queid) {
|
|
||||||
/* Sanity check */
|
|
||||||
rc = lpfc_idiag_que_param_check(
|
|
||||||
phba->sli4_hba.nvmels_wq, index, count);
|
|
||||||
if (rc)
|
|
||||||
goto error_out;
|
|
||||||
idiag.ptr_private = phba->sli4_hba.nvmels_wq;
|
|
||||||
goto pass_check;
|
|
||||||
}
|
|
||||||
/* FCP work queue */
|
/* FCP work queue */
|
||||||
if (phba->sli4_hba.fcp_wq) {
|
if (phba->sli4_hba.fcp_wq) {
|
||||||
for (qidx = 0; qidx < phba->cfg_fcp_io_channel;
|
for (qidx = 0; qidx < phba->cfg_fcp_io_channel;
|
||||||
|
|
|
@ -5177,15 +5177,15 @@ lpfc_rdp_res_speed(struct fc_rdp_port_speed_desc *desc, struct lpfc_hba *phba)
|
||||||
|
|
||||||
static uint32_t
|
static uint32_t
|
||||||
lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc,
|
lpfc_rdp_res_diag_port_names(struct fc_rdp_port_name_desc *desc,
|
||||||
struct lpfc_hba *phba)
|
struct lpfc_vport *vport)
|
||||||
{
|
{
|
||||||
|
|
||||||
desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG);
|
desc->tag = cpu_to_be32(RDP_PORT_NAMES_DESC_TAG);
|
||||||
|
|
||||||
memcpy(desc->port_names.wwnn, phba->wwnn,
|
memcpy(desc->port_names.wwnn, &vport->fc_nodename,
|
||||||
sizeof(desc->port_names.wwnn));
|
sizeof(desc->port_names.wwnn));
|
||||||
|
|
||||||
memcpy(desc->port_names.wwpn, phba->wwpn,
|
memcpy(desc->port_names.wwpn, &vport->fc_portname,
|
||||||
sizeof(desc->port_names.wwpn));
|
sizeof(desc->port_names.wwpn));
|
||||||
|
|
||||||
desc->length = cpu_to_be32(sizeof(desc->port_names));
|
desc->length = cpu_to_be32(sizeof(desc->port_names));
|
||||||
|
@ -5279,7 +5279,7 @@ lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context,
|
||||||
len += lpfc_rdp_res_link_error((struct fc_rdp_link_error_status_desc *)
|
len += lpfc_rdp_res_link_error((struct fc_rdp_link_error_status_desc *)
|
||||||
(len + pcmd), &rdp_context->link_stat);
|
(len + pcmd), &rdp_context->link_stat);
|
||||||
len += lpfc_rdp_res_diag_port_names((struct fc_rdp_port_name_desc *)
|
len += lpfc_rdp_res_diag_port_names((struct fc_rdp_port_name_desc *)
|
||||||
(len + pcmd), phba);
|
(len + pcmd), vport);
|
||||||
len += lpfc_rdp_res_attach_port_names((struct fc_rdp_port_name_desc *)
|
len += lpfc_rdp_res_attach_port_names((struct fc_rdp_port_name_desc *)
|
||||||
(len + pcmd), vport, ndlp);
|
(len + pcmd), vport, ndlp);
|
||||||
len += lpfc_rdp_res_fec_desc((struct fc_fec_rdp_desc *)(len + pcmd),
|
len += lpfc_rdp_res_fec_desc((struct fc_fec_rdp_desc *)(len + pcmd),
|
||||||
|
@ -8371,11 +8371,17 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
|
||||||
spin_lock_irq(shost->host_lock);
|
spin_lock_irq(shost->host_lock);
|
||||||
vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
|
vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
|
||||||
spin_unlock_irq(shost->host_lock);
|
spin_unlock_irq(shost->host_lock);
|
||||||
if (vport->port_type == LPFC_PHYSICAL_PORT
|
if (mb->mbxStatus == MBX_NOT_FINISHED)
|
||||||
&& !(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG))
|
break;
|
||||||
lpfc_issue_init_vfi(vport);
|
if ((vport->port_type == LPFC_PHYSICAL_PORT) &&
|
||||||
else
|
!(vport->fc_flag & FC_LOGO_RCVD_DID_CHNG)) {
|
||||||
|
if (phba->sli_rev == LPFC_SLI_REV4)
|
||||||
|
lpfc_issue_init_vfi(vport);
|
||||||
|
else
|
||||||
|
lpfc_initial_flogi(vport);
|
||||||
|
} else {
|
||||||
lpfc_initial_fdisc(vport);
|
lpfc_initial_fdisc(vport);
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -313,8 +313,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
|
||||||
ndlp->nlp_state, ndlp->nlp_rpi);
|
ndlp->nlp_state, ndlp->nlp_rpi);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!(vport->load_flag & FC_UNLOADING) &&
|
if (!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
|
||||||
!(ndlp->nlp_flag & NLP_DELAY_TMO) &&
|
|
||||||
!(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
|
!(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
|
||||||
(ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
|
(ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
|
||||||
(ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) &&
|
(ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) &&
|
||||||
|
@ -641,6 +640,8 @@ lpfc_work_done(struct lpfc_hba *phba)
|
||||||
lpfc_handle_rrq_active(phba);
|
lpfc_handle_rrq_active(phba);
|
||||||
if (phba->hba_flag & FCP_XRI_ABORT_EVENT)
|
if (phba->hba_flag & FCP_XRI_ABORT_EVENT)
|
||||||
lpfc_sli4_fcp_xri_abort_event_proc(phba);
|
lpfc_sli4_fcp_xri_abort_event_proc(phba);
|
||||||
|
if (phba->hba_flag & NVME_XRI_ABORT_EVENT)
|
||||||
|
lpfc_sli4_nvme_xri_abort_event_proc(phba);
|
||||||
if (phba->hba_flag & ELS_XRI_ABORT_EVENT)
|
if (phba->hba_flag & ELS_XRI_ABORT_EVENT)
|
||||||
lpfc_sli4_els_xri_abort_event_proc(phba);
|
lpfc_sli4_els_xri_abort_event_proc(phba);
|
||||||
if (phba->hba_flag & ASYNC_EVENT)
|
if (phba->hba_flag & ASYNC_EVENT)
|
||||||
|
@ -2173,7 +2174,7 @@ lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
|
||||||
uint32_t boot_flag, addr_mode;
|
uint32_t boot_flag, addr_mode;
|
||||||
uint16_t fcf_index, next_fcf_index;
|
uint16_t fcf_index, next_fcf_index;
|
||||||
struct lpfc_fcf_rec *fcf_rec = NULL;
|
struct lpfc_fcf_rec *fcf_rec = NULL;
|
||||||
uint16_t vlan_id;
|
uint16_t vlan_id = LPFC_FCOE_NULL_VID;
|
||||||
bool select_new_fcf;
|
bool select_new_fcf;
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
|
@ -4020,9 +4021,11 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
||||||
rdata = rport->dd_data;
|
rdata = rport->dd_data;
|
||||||
/* break the link before dropping the ref */
|
/* break the link before dropping the ref */
|
||||||
ndlp->rport = NULL;
|
ndlp->rport = NULL;
|
||||||
if (rdata && rdata->pnode == ndlp)
|
if (rdata) {
|
||||||
lpfc_nlp_put(ndlp);
|
if (rdata->pnode == ndlp)
|
||||||
rdata->pnode = NULL;
|
lpfc_nlp_put(ndlp);
|
||||||
|
rdata->pnode = NULL;
|
||||||
|
}
|
||||||
/* drop reference for earlier registeration */
|
/* drop reference for earlier registeration */
|
||||||
put_device(&rport->dev);
|
put_device(&rport->dev);
|
||||||
}
|
}
|
||||||
|
@ -4344,9 +4347,8 @@ lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
||||||
{
|
{
|
||||||
INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
|
INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp);
|
||||||
INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
|
INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp);
|
||||||
init_timer(&ndlp->nlp_delayfunc);
|
setup_timer(&ndlp->nlp_delayfunc, lpfc_els_retry_delay,
|
||||||
ndlp->nlp_delayfunc.function = lpfc_els_retry_delay;
|
(unsigned long)ndlp);
|
||||||
ndlp->nlp_delayfunc.data = (unsigned long)ndlp;
|
|
||||||
ndlp->nlp_DID = did;
|
ndlp->nlp_DID = did;
|
||||||
ndlp->vport = vport;
|
ndlp->vport = vport;
|
||||||
ndlp->phba = vport->phba;
|
ndlp->phba = vport->phba;
|
||||||
|
@ -4606,9 +4608,9 @@ lpfc_sli4_dequeue_nport_iocbs(struct lpfc_hba *phba,
|
||||||
pring = qp->pring;
|
pring = qp->pring;
|
||||||
if (!pring)
|
if (!pring)
|
||||||
continue;
|
continue;
|
||||||
spin_lock_irq(&pring->ring_lock);
|
spin_lock(&pring->ring_lock);
|
||||||
__lpfc_dequeue_nport_iocbs(phba, ndlp, pring, dequeue_list);
|
__lpfc_dequeue_nport_iocbs(phba, ndlp, pring, dequeue_list);
|
||||||
spin_unlock_irq(&pring->ring_lock);
|
spin_unlock(&pring->ring_lock);
|
||||||
}
|
}
|
||||||
spin_unlock_irq(&phba->hbalock);
|
spin_unlock_irq(&phba->hbalock);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1001,7 +1001,7 @@ struct eq_delay_info {
|
||||||
uint32_t phase;
|
uint32_t phase;
|
||||||
uint32_t delay_multi;
|
uint32_t delay_multi;
|
||||||
};
|
};
|
||||||
#define LPFC_MAX_EQ_DELAY 8
|
#define LPFC_MAX_EQ_DELAY_EQID_CNT 8
|
||||||
|
|
||||||
struct sgl_page_pairs {
|
struct sgl_page_pairs {
|
||||||
uint32_t sgl_pg0_addr_lo;
|
uint32_t sgl_pg0_addr_lo;
|
||||||
|
@ -1070,7 +1070,7 @@ struct lpfc_mbx_modify_eq_delay {
|
||||||
union {
|
union {
|
||||||
struct {
|
struct {
|
||||||
uint32_t num_eq;
|
uint32_t num_eq;
|
||||||
struct eq_delay_info eq[LPFC_MAX_EQ_DELAY];
|
struct eq_delay_info eq[LPFC_MAX_EQ_DELAY_EQID_CNT];
|
||||||
} request;
|
} request;
|
||||||
struct {
|
struct {
|
||||||
uint32_t word0;
|
uint32_t word0;
|
||||||
|
|
|
@ -3555,6 +3555,44 @@ lpfc_sli4_scsi_sgl_update(struct lpfc_hba *phba)
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static uint64_t
|
||||||
|
lpfc_get_wwpn(struct lpfc_hba *phba)
|
||||||
|
{
|
||||||
|
uint64_t wwn;
|
||||||
|
int rc;
|
||||||
|
LPFC_MBOXQ_t *mboxq;
|
||||||
|
MAILBOX_t *mb;
|
||||||
|
|
||||||
|
|
||||||
|
mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
|
||||||
|
GFP_KERNEL);
|
||||||
|
if (!mboxq)
|
||||||
|
return (uint64_t)-1;
|
||||||
|
|
||||||
|
/* First get WWN of HBA instance */
|
||||||
|
lpfc_read_nv(phba, mboxq);
|
||||||
|
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
|
||||||
|
if (rc != MBX_SUCCESS) {
|
||||||
|
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
||||||
|
"6019 Mailbox failed , mbxCmd x%x "
|
||||||
|
"READ_NV, mbxStatus x%x\n",
|
||||||
|
bf_get(lpfc_mqe_command, &mboxq->u.mqe),
|
||||||
|
bf_get(lpfc_mqe_status, &mboxq->u.mqe));
|
||||||
|
mempool_free(mboxq, phba->mbox_mem_pool);
|
||||||
|
return (uint64_t) -1;
|
||||||
|
}
|
||||||
|
mb = &mboxq->u.mb;
|
||||||
|
memcpy(&wwn, (char *)mb->un.varRDnvp.portname, sizeof(uint64_t));
|
||||||
|
/* wwn is WWPN of HBA instance */
|
||||||
|
mempool_free(mboxq, phba->mbox_mem_pool);
|
||||||
|
if (phba->sli_rev == LPFC_SLI_REV4)
|
||||||
|
return be64_to_cpu(wwn);
|
||||||
|
else
|
||||||
|
return (((wwn & 0xffffffff00000000) >> 32) |
|
||||||
|
((wwn & 0x00000000ffffffff) << 32));
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* lpfc_sli4_nvme_sgl_update - update xri-sgl sizing and mapping
|
* lpfc_sli4_nvme_sgl_update - update xri-sgl sizing and mapping
|
||||||
* @phba: pointer to lpfc hba data structure.
|
* @phba: pointer to lpfc hba data structure.
|
||||||
|
@ -3676,17 +3714,32 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
|
||||||
struct lpfc_vport *vport;
|
struct lpfc_vport *vport;
|
||||||
struct Scsi_Host *shost = NULL;
|
struct Scsi_Host *shost = NULL;
|
||||||
int error = 0;
|
int error = 0;
|
||||||
|
int i;
|
||||||
|
uint64_t wwn;
|
||||||
|
bool use_no_reset_hba = false;
|
||||||
|
|
||||||
|
wwn = lpfc_get_wwpn(phba);
|
||||||
|
|
||||||
|
for (i = 0; i < lpfc_no_hba_reset_cnt; i++) {
|
||||||
|
if (wwn == lpfc_no_hba_reset[i]) {
|
||||||
|
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
||||||
|
"6020 Setting use_no_reset port=%llx\n",
|
||||||
|
wwn);
|
||||||
|
use_no_reset_hba = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
|
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_FCP) {
|
||||||
if (dev != &phba->pcidev->dev) {
|
if (dev != &phba->pcidev->dev) {
|
||||||
shost = scsi_host_alloc(&lpfc_vport_template,
|
shost = scsi_host_alloc(&lpfc_vport_template,
|
||||||
sizeof(struct lpfc_vport));
|
sizeof(struct lpfc_vport));
|
||||||
} else {
|
} else {
|
||||||
if (phba->sli_rev == LPFC_SLI_REV4)
|
if (!use_no_reset_hba)
|
||||||
shost = scsi_host_alloc(&lpfc_template,
|
shost = scsi_host_alloc(&lpfc_template,
|
||||||
sizeof(struct lpfc_vport));
|
sizeof(struct lpfc_vport));
|
||||||
else
|
else
|
||||||
shost = scsi_host_alloc(&lpfc_template_s3,
|
shost = scsi_host_alloc(&lpfc_template_no_hr,
|
||||||
sizeof(struct lpfc_vport));
|
sizeof(struct lpfc_vport));
|
||||||
}
|
}
|
||||||
} else if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
|
} else if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
|
||||||
|
@ -3734,17 +3787,14 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
|
||||||
INIT_LIST_HEAD(&vport->rcv_buffer_list);
|
INIT_LIST_HEAD(&vport->rcv_buffer_list);
|
||||||
spin_lock_init(&vport->work_port_lock);
|
spin_lock_init(&vport->work_port_lock);
|
||||||
|
|
||||||
init_timer(&vport->fc_disctmo);
|
setup_timer(&vport->fc_disctmo, lpfc_disc_timeout,
|
||||||
vport->fc_disctmo.function = lpfc_disc_timeout;
|
(unsigned long)vport);
|
||||||
vport->fc_disctmo.data = (unsigned long)vport;
|
|
||||||
|
|
||||||
init_timer(&vport->els_tmofunc);
|
setup_timer(&vport->els_tmofunc, lpfc_els_timeout,
|
||||||
vport->els_tmofunc.function = lpfc_els_timeout;
|
(unsigned long)vport);
|
||||||
vport->els_tmofunc.data = (unsigned long)vport;
|
|
||||||
|
|
||||||
init_timer(&vport->delayed_disc_tmo);
|
setup_timer(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo,
|
||||||
vport->delayed_disc_tmo.function = lpfc_delayed_disc_tmo;
|
(unsigned long)vport);
|
||||||
vport->delayed_disc_tmo.data = (unsigned long)vport;
|
|
||||||
|
|
||||||
error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
|
error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
|
||||||
if (error)
|
if (error)
|
||||||
|
@ -5406,21 +5456,15 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
|
||||||
INIT_LIST_HEAD(&phba->luns);
|
INIT_LIST_HEAD(&phba->luns);
|
||||||
|
|
||||||
/* MBOX heartbeat timer */
|
/* MBOX heartbeat timer */
|
||||||
init_timer(&psli->mbox_tmo);
|
setup_timer(&psli->mbox_tmo, lpfc_mbox_timeout, (unsigned long)phba);
|
||||||
psli->mbox_tmo.function = lpfc_mbox_timeout;
|
|
||||||
psli->mbox_tmo.data = (unsigned long) phba;
|
|
||||||
/* Fabric block timer */
|
/* Fabric block timer */
|
||||||
init_timer(&phba->fabric_block_timer);
|
setup_timer(&phba->fabric_block_timer, lpfc_fabric_block_timeout,
|
||||||
phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
|
(unsigned long)phba);
|
||||||
phba->fabric_block_timer.data = (unsigned long) phba;
|
|
||||||
/* EA polling mode timer */
|
/* EA polling mode timer */
|
||||||
init_timer(&phba->eratt_poll);
|
setup_timer(&phba->eratt_poll, lpfc_poll_eratt,
|
||||||
phba->eratt_poll.function = lpfc_poll_eratt;
|
(unsigned long)phba);
|
||||||
phba->eratt_poll.data = (unsigned long) phba;
|
|
||||||
/* Heartbeat timer */
|
/* Heartbeat timer */
|
||||||
init_timer(&phba->hb_tmofunc);
|
setup_timer(&phba->hb_tmofunc, lpfc_hb_timeout, (unsigned long)phba);
|
||||||
phba->hb_tmofunc.function = lpfc_hb_timeout;
|
|
||||||
phba->hb_tmofunc.data = (unsigned long)phba;
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -5446,9 +5490,8 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/* FCP polling mode timer */
|
/* FCP polling mode timer */
|
||||||
init_timer(&phba->fcp_poll_timer);
|
setup_timer(&phba->fcp_poll_timer, lpfc_poll_timeout,
|
||||||
phba->fcp_poll_timer.function = lpfc_poll_timeout;
|
(unsigned long)phba);
|
||||||
phba->fcp_poll_timer.data = (unsigned long) phba;
|
|
||||||
|
|
||||||
/* Host attention work mask setup */
|
/* Host attention work mask setup */
|
||||||
phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
|
phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
|
||||||
|
@ -5482,7 +5525,8 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
|
||||||
|
|
||||||
/* Initialize the host templates the configured values. */
|
/* Initialize the host templates the configured values. */
|
||||||
lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
|
lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
|
||||||
lpfc_template_s3.sg_tablesize = phba->cfg_sg_seg_cnt;
|
lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt;
|
||||||
|
lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
|
||||||
|
|
||||||
/* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
|
/* There are going to be 2 reserved BDEs: 1 FCP cmnd + 1 FCP rsp */
|
||||||
if (phba->cfg_enable_bg) {
|
if (phba->cfg_enable_bg) {
|
||||||
|
@ -5617,14 +5661,11 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
|
||||||
* Initialize timers used by driver
|
* Initialize timers used by driver
|
||||||
*/
|
*/
|
||||||
|
|
||||||
init_timer(&phba->rrq_tmr);
|
setup_timer(&phba->rrq_tmr, lpfc_rrq_timeout, (unsigned long)phba);
|
||||||
phba->rrq_tmr.function = lpfc_rrq_timeout;
|
|
||||||
phba->rrq_tmr.data = (unsigned long)phba;
|
|
||||||
|
|
||||||
/* FCF rediscover timer */
|
/* FCF rediscover timer */
|
||||||
init_timer(&phba->fcf.redisc_wait);
|
setup_timer(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo,
|
||||||
phba->fcf.redisc_wait.function = lpfc_sli4_fcf_redisc_wait_tmo;
|
(unsigned long)phba);
|
||||||
phba->fcf.redisc_wait.data = (unsigned long)phba;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Control structure for handling external multi-buffer mailbox
|
* Control structure for handling external multi-buffer mailbox
|
||||||
|
@ -5706,6 +5747,7 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
|
||||||
/* Initialize the host templates with the updated values. */
|
/* Initialize the host templates with the updated values. */
|
||||||
lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
|
lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
|
||||||
lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
|
lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
|
||||||
|
lpfc_template_no_hr.sg_tablesize = phba->cfg_sg_seg_cnt;
|
||||||
|
|
||||||
if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ)
|
if (phba->cfg_sg_dma_buf_size <= LPFC_MIN_SG_SLI4_BUF_SZ)
|
||||||
phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
|
phba->cfg_sg_dma_buf_size = LPFC_MIN_SG_SLI4_BUF_SZ;
|
||||||
|
@ -5736,6 +5778,8 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
|
||||||
/* Initialize the Abort nvme buffer list used by driver */
|
/* Initialize the Abort nvme buffer list used by driver */
|
||||||
spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock);
|
spin_lock_init(&phba->sli4_hba.abts_nvme_buf_list_lock);
|
||||||
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
|
INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_nvme_buf_list);
|
||||||
|
/* Fast-path XRI aborted CQ Event work queue list */
|
||||||
|
INIT_LIST_HEAD(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* This abort list used by worker thread */
|
/* This abort list used by worker thread */
|
||||||
|
@ -8712,12 +8756,9 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
for (qidx = 0; qidx < io_channel; qidx += LPFC_MAX_EQ_DELAY_EQID_CNT)
|
||||||
* Configure EQ delay multipier for interrupt coalescing using
|
|
||||||
* MODIFY_EQ_DELAY for all EQs created, LPFC_MAX_EQ_DELAY at a time.
|
|
||||||
*/
|
|
||||||
for (qidx = 0; qidx < io_channel; qidx += LPFC_MAX_EQ_DELAY)
|
|
||||||
lpfc_modify_hba_eq_delay(phba, qidx);
|
lpfc_modify_hba_eq_delay(phba, qidx);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_destroy:
|
out_destroy:
|
||||||
|
@ -8973,6 +9014,11 @@ lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
|
||||||
/* Pending ELS XRI abort events */
|
/* Pending ELS XRI abort events */
|
||||||
list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
|
list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
|
||||||
&cqelist);
|
&cqelist);
|
||||||
|
if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
|
||||||
|
/* Pending NVME XRI abort events */
|
||||||
|
list_splice_init(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue,
|
||||||
|
&cqelist);
|
||||||
|
}
|
||||||
/* Pending asynnc events */
|
/* Pending asynnc events */
|
||||||
list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
|
list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
|
||||||
&cqelist);
|
&cqelist);
|
||||||
|
@ -10400,12 +10446,7 @@ lpfc_pci_remove_one_s3(struct pci_dev *pdev)
|
||||||
fc_remove_host(shost);
|
fc_remove_host(shost);
|
||||||
scsi_remove_host(shost);
|
scsi_remove_host(shost);
|
||||||
|
|
||||||
/* Perform ndlp cleanup on the physical port. The nvme and nvmet
|
|
||||||
* localports are destroyed after to cleanup all transport memory.
|
|
||||||
*/
|
|
||||||
lpfc_cleanup(vport);
|
lpfc_cleanup(vport);
|
||||||
lpfc_nvmet_destroy_targetport(phba);
|
|
||||||
lpfc_nvme_destroy_localport(vport);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Bring down the SLI Layer. This step disable all interrupts,
|
* Bring down the SLI Layer. This step disable all interrupts,
|
||||||
|
|
|
@ -646,7 +646,6 @@ lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
|
||||||
}
|
}
|
||||||
|
|
||||||
dma_buf->iocbq = lpfc_sli_get_iocbq(phba);
|
dma_buf->iocbq = lpfc_sli_get_iocbq(phba);
|
||||||
dma_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
|
|
||||||
if (!dma_buf->iocbq) {
|
if (!dma_buf->iocbq) {
|
||||||
kfree(dma_buf->context);
|
kfree(dma_buf->context);
|
||||||
pci_pool_free(phba->lpfc_drb_pool, dma_buf->dbuf.virt,
|
pci_pool_free(phba->lpfc_drb_pool, dma_buf->dbuf.virt,
|
||||||
|
@ -658,6 +657,7 @@ lpfc_sli4_nvmet_alloc(struct lpfc_hba *phba)
|
||||||
"2621 Ran out of nvmet iocb/WQEs\n");
|
"2621 Ran out of nvmet iocb/WQEs\n");
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
dma_buf->iocbq->iocb_flag = LPFC_IO_NVMET;
|
||||||
nvmewqe = dma_buf->iocbq;
|
nvmewqe = dma_buf->iocbq;
|
||||||
wqe = (union lpfc_wqe128 *)&nvmewqe->wqe;
|
wqe = (union lpfc_wqe128 *)&nvmewqe->wqe;
|
||||||
/* Initialize WQE */
|
/* Initialize WQE */
|
||||||
|
|
|
@ -316,7 +316,7 @@ lpfc_nvme_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
|
||||||
bf_set(wqe_dfctl, &wqe->gen_req.wge_ctl, 0);
|
bf_set(wqe_dfctl, &wqe->gen_req.wge_ctl, 0);
|
||||||
bf_set(wqe_si, &wqe->gen_req.wge_ctl, 1);
|
bf_set(wqe_si, &wqe->gen_req.wge_ctl, 1);
|
||||||
bf_set(wqe_la, &wqe->gen_req.wge_ctl, 1);
|
bf_set(wqe_la, &wqe->gen_req.wge_ctl, 1);
|
||||||
bf_set(wqe_rctl, &wqe->gen_req.wge_ctl, FC_RCTL_DD_UNSOL_CTL);
|
bf_set(wqe_rctl, &wqe->gen_req.wge_ctl, FC_RCTL_ELS4_REQ);
|
||||||
bf_set(wqe_type, &wqe->gen_req.wge_ctl, FC_TYPE_NVME);
|
bf_set(wqe_type, &wqe->gen_req.wge_ctl, FC_TYPE_NVME);
|
||||||
|
|
||||||
/* Word 6 */
|
/* Word 6 */
|
||||||
|
@ -620,15 +620,15 @@ lpfc_nvme_adj_fcp_sgls(struct lpfc_vport *vport,
|
||||||
* Embed the payload in the last half of the WQE
|
* Embed the payload in the last half of the WQE
|
||||||
* WQE words 16-30 get the NVME CMD IU payload
|
* WQE words 16-30 get the NVME CMD IU payload
|
||||||
*
|
*
|
||||||
* WQE Word 16 is already setup with flags
|
* WQE words 16-19 get payload Words 1-4
|
||||||
* WQE words 17-19 get payload Words 2-4
|
|
||||||
* WQE words 20-21 get payload Words 6-7
|
* WQE words 20-21 get payload Words 6-7
|
||||||
* WQE words 22-29 get payload Words 16-23
|
* WQE words 22-29 get payload Words 16-23
|
||||||
*/
|
*/
|
||||||
wptr = &wqe->words[17]; /* WQE ptr */
|
wptr = &wqe->words[16]; /* WQE ptr */
|
||||||
dptr = (uint32_t *)nCmd->cmdaddr; /* payload ptr */
|
dptr = (uint32_t *)nCmd->cmdaddr; /* payload ptr */
|
||||||
dptr += 2; /* Skip Words 0-1 in payload */
|
dptr++; /* Skip Word 0 in payload */
|
||||||
|
|
||||||
|
*wptr++ = *dptr++; /* Word 1 */
|
||||||
*wptr++ = *dptr++; /* Word 2 */
|
*wptr++ = *dptr++; /* Word 2 */
|
||||||
*wptr++ = *dptr++; /* Word 3 */
|
*wptr++ = *dptr++; /* Word 3 */
|
||||||
*wptr++ = *dptr++; /* Word 4 */
|
*wptr++ = *dptr++; /* Word 4 */
|
||||||
|
@ -978,9 +978,6 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
|
||||||
bf_set(wqe_cmd_type, &wqe->generic.wqe_com,
|
bf_set(wqe_cmd_type, &wqe->generic.wqe_com,
|
||||||
NVME_WRITE_CMD);
|
NVME_WRITE_CMD);
|
||||||
|
|
||||||
/* Word 16 */
|
|
||||||
wqe->words[16] = LPFC_NVME_EMBED_WRITE;
|
|
||||||
|
|
||||||
phba->fc4NvmeOutputRequests++;
|
phba->fc4NvmeOutputRequests++;
|
||||||
} else {
|
} else {
|
||||||
/* Word 7 */
|
/* Word 7 */
|
||||||
|
@ -1002,9 +999,6 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
|
||||||
bf_set(wqe_cmd_type, &wqe->generic.wqe_com,
|
bf_set(wqe_cmd_type, &wqe->generic.wqe_com,
|
||||||
NVME_READ_CMD);
|
NVME_READ_CMD);
|
||||||
|
|
||||||
/* Word 16 */
|
|
||||||
wqe->words[16] = LPFC_NVME_EMBED_READ;
|
|
||||||
|
|
||||||
phba->fc4NvmeInputRequests++;
|
phba->fc4NvmeInputRequests++;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -1026,9 +1020,6 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport,
|
||||||
/* Word 11 */
|
/* Word 11 */
|
||||||
bf_set(wqe_cmd_type, &wqe->generic.wqe_com, NVME_READ_CMD);
|
bf_set(wqe_cmd_type, &wqe->generic.wqe_com, NVME_READ_CMD);
|
||||||
|
|
||||||
/* Word 16 */
|
|
||||||
wqe->words[16] = LPFC_NVME_EMBED_CMD;
|
|
||||||
|
|
||||||
phba->fc4NvmeControlRequests++;
|
phba->fc4NvmeControlRequests++;
|
||||||
}
|
}
|
||||||
/*
|
/*
|
||||||
|
@ -1286,6 +1277,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
|
||||||
pnvme_fcreq->private = (void *)lpfc_ncmd;
|
pnvme_fcreq->private = (void *)lpfc_ncmd;
|
||||||
lpfc_ncmd->nvmeCmd = pnvme_fcreq;
|
lpfc_ncmd->nvmeCmd = pnvme_fcreq;
|
||||||
lpfc_ncmd->nrport = rport;
|
lpfc_ncmd->nrport = rport;
|
||||||
|
lpfc_ncmd->ndlp = ndlp;
|
||||||
lpfc_ncmd->start_time = jiffies;
|
lpfc_ncmd->start_time = jiffies;
|
||||||
|
|
||||||
lpfc_nvme_prep_io_cmd(vport, lpfc_ncmd, ndlp);
|
lpfc_nvme_prep_io_cmd(vport, lpfc_ncmd, ndlp);
|
||||||
|
@ -1319,7 +1311,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport,
|
||||||
"sid: x%x did: x%x oxid: x%x\n",
|
"sid: x%x did: x%x oxid: x%x\n",
|
||||||
ret, vport->fc_myDID, ndlp->nlp_DID,
|
ret, vport->fc_myDID, ndlp->nlp_DID,
|
||||||
lpfc_ncmd->cur_iocbq.sli4_xritag);
|
lpfc_ncmd->cur_iocbq.sli4_xritag);
|
||||||
ret = -EINVAL;
|
ret = -EBUSY;
|
||||||
goto out_free_nvme_buf;
|
goto out_free_nvme_buf;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1821,10 +1813,10 @@ lpfc_post_nvme_sgl_list(struct lpfc_hba *phba,
|
||||||
pdma_phys_sgl1, cur_xritag);
|
pdma_phys_sgl1, cur_xritag);
|
||||||
if (status) {
|
if (status) {
|
||||||
/* failure, put on abort nvme list */
|
/* failure, put on abort nvme list */
|
||||||
lpfc_ncmd->exch_busy = 1;
|
lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
|
||||||
} else {
|
} else {
|
||||||
/* success, put on NVME buffer list */
|
/* success, put on NVME buffer list */
|
||||||
lpfc_ncmd->exch_busy = 0;
|
lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
|
||||||
lpfc_ncmd->status = IOSTAT_SUCCESS;
|
lpfc_ncmd->status = IOSTAT_SUCCESS;
|
||||||
num_posted++;
|
num_posted++;
|
||||||
}
|
}
|
||||||
|
@ -1854,10 +1846,10 @@ lpfc_post_nvme_sgl_list(struct lpfc_hba *phba,
|
||||||
struct lpfc_nvme_buf, list);
|
struct lpfc_nvme_buf, list);
|
||||||
if (status) {
|
if (status) {
|
||||||
/* failure, put on abort nvme list */
|
/* failure, put on abort nvme list */
|
||||||
lpfc_ncmd->exch_busy = 1;
|
lpfc_ncmd->flags |= LPFC_SBUF_XBUSY;
|
||||||
} else {
|
} else {
|
||||||
/* success, put on NVME buffer list */
|
/* success, put on NVME buffer list */
|
||||||
lpfc_ncmd->exch_busy = 0;
|
lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
|
||||||
lpfc_ncmd->status = IOSTAT_SUCCESS;
|
lpfc_ncmd->status = IOSTAT_SUCCESS;
|
||||||
num_posted++;
|
num_posted++;
|
||||||
}
|
}
|
||||||
|
@ -2099,7 +2091,7 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd)
|
||||||
unsigned long iflag = 0;
|
unsigned long iflag = 0;
|
||||||
|
|
||||||
lpfc_ncmd->nonsg_phys = 0;
|
lpfc_ncmd->nonsg_phys = 0;
|
||||||
if (lpfc_ncmd->exch_busy) {
|
if (lpfc_ncmd->flags & LPFC_SBUF_XBUSY) {
|
||||||
spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock,
|
spin_lock_irqsave(&phba->sli4_hba.abts_nvme_buf_list_lock,
|
||||||
iflag);
|
iflag);
|
||||||
lpfc_ncmd->nvmeCmd = NULL;
|
lpfc_ncmd->nvmeCmd = NULL;
|
||||||
|
@ -2135,11 +2127,12 @@ lpfc_release_nvme_buf(struct lpfc_hba *phba, struct lpfc_nvme_buf *lpfc_ncmd)
|
||||||
int
|
int
|
||||||
lpfc_nvme_create_localport(struct lpfc_vport *vport)
|
lpfc_nvme_create_localport(struct lpfc_vport *vport)
|
||||||
{
|
{
|
||||||
|
int ret = 0;
|
||||||
struct lpfc_hba *phba = vport->phba;
|
struct lpfc_hba *phba = vport->phba;
|
||||||
struct nvme_fc_port_info nfcp_info;
|
struct nvme_fc_port_info nfcp_info;
|
||||||
struct nvme_fc_local_port *localport;
|
struct nvme_fc_local_port *localport;
|
||||||
struct lpfc_nvme_lport *lport;
|
struct lpfc_nvme_lport *lport;
|
||||||
int len, ret = 0;
|
int len;
|
||||||
|
|
||||||
/* Initialize this localport instance. The vport wwn usage ensures
|
/* Initialize this localport instance. The vport wwn usage ensures
|
||||||
* that NPIV is accounted for.
|
* that NPIV is accounted for.
|
||||||
|
@ -2156,8 +2149,12 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
|
||||||
/* localport is allocated from the stack, but the registration
|
/* localport is allocated from the stack, but the registration
|
||||||
* call allocates heap memory as well as the private area.
|
* call allocates heap memory as well as the private area.
|
||||||
*/
|
*/
|
||||||
|
#ifdef CONFIG_LPFC_NVME_INITIATOR
|
||||||
ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template,
|
ret = nvme_fc_register_localport(&nfcp_info, &lpfc_nvme_template,
|
||||||
&vport->phba->pcidev->dev, &localport);
|
&vport->phba->pcidev->dev, &localport);
|
||||||
|
#else
|
||||||
|
ret = -ENOMEM;
|
||||||
|
#endif
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC,
|
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME | LOG_NVME_DISC,
|
||||||
"6005 Successfully registered local "
|
"6005 Successfully registered local "
|
||||||
|
@ -2173,10 +2170,10 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
|
||||||
lport->vport = vport;
|
lport->vport = vport;
|
||||||
INIT_LIST_HEAD(&lport->rport_list);
|
INIT_LIST_HEAD(&lport->rport_list);
|
||||||
vport->nvmei_support = 1;
|
vport->nvmei_support = 1;
|
||||||
|
len = lpfc_new_nvme_buf(vport, phba->sli4_hba.nvme_xri_max);
|
||||||
|
vport->phba->total_nvme_bufs += len;
|
||||||
}
|
}
|
||||||
|
|
||||||
len = lpfc_new_nvme_buf(vport, phba->sli4_hba.nvme_xri_max);
|
|
||||||
vport->phba->total_nvme_bufs += len;
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2193,6 +2190,7 @@ lpfc_nvme_create_localport(struct lpfc_vport *vport)
|
||||||
void
|
void
|
||||||
lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
|
lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
|
||||||
{
|
{
|
||||||
|
#ifdef CONFIG_LPFC_NVME_INITIATOR
|
||||||
struct nvme_fc_local_port *localport;
|
struct nvme_fc_local_port *localport;
|
||||||
struct lpfc_nvme_lport *lport;
|
struct lpfc_nvme_lport *lport;
|
||||||
struct lpfc_nvme_rport *rport = NULL, *rport_next = NULL;
|
struct lpfc_nvme_rport *rport = NULL, *rport_next = NULL;
|
||||||
|
@ -2208,7 +2206,6 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
|
||||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
|
lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME,
|
||||||
"6011 Destroying NVME localport %p\n",
|
"6011 Destroying NVME localport %p\n",
|
||||||
localport);
|
localport);
|
||||||
|
|
||||||
list_for_each_entry_safe(rport, rport_next, &lport->rport_list, list) {
|
list_for_each_entry_safe(rport, rport_next, &lport->rport_list, list) {
|
||||||
/* The last node ref has to get released now before the rport
|
/* The last node ref has to get released now before the rport
|
||||||
* private memory area is released by the transport.
|
* private memory area is released by the transport.
|
||||||
|
@ -2222,6 +2219,7 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
|
||||||
"6008 rport fail destroy %x\n", ret);
|
"6008 rport fail destroy %x\n", ret);
|
||||||
wait_for_completion_timeout(&rport->rport_unreg_done, 5);
|
wait_for_completion_timeout(&rport->rport_unreg_done, 5);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* lport's rport list is clear. Unregister
|
/* lport's rport list is clear. Unregister
|
||||||
* lport and release resources.
|
* lport and release resources.
|
||||||
*/
|
*/
|
||||||
|
@ -2245,6 +2243,7 @@ lpfc_nvme_destroy_localport(struct lpfc_vport *vport)
|
||||||
"Failed, status x%x\n",
|
"Failed, status x%x\n",
|
||||||
ret);
|
ret);
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
|
@ -2275,6 +2274,7 @@ lpfc_nvme_update_localport(struct lpfc_vport *vport)
|
||||||
int
|
int
|
||||||
lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
||||||
{
|
{
|
||||||
|
#ifdef CONFIG_LPFC_NVME_INITIATOR
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
struct nvme_fc_local_port *localport;
|
struct nvme_fc_local_port *localport;
|
||||||
struct lpfc_nvme_lport *lport;
|
struct lpfc_nvme_lport *lport;
|
||||||
|
@ -2348,7 +2348,6 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
||||||
rpinfo.port_role |= FC_PORT_ROLE_NVME_INITIATOR;
|
rpinfo.port_role |= FC_PORT_ROLE_NVME_INITIATOR;
|
||||||
rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
|
rpinfo.port_name = wwn_to_u64(ndlp->nlp_portname.u.wwn);
|
||||||
rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
|
rpinfo.node_name = wwn_to_u64(ndlp->nlp_nodename.u.wwn);
|
||||||
|
|
||||||
ret = nvme_fc_register_remoteport(localport, &rpinfo,
|
ret = nvme_fc_register_remoteport(localport, &rpinfo,
|
||||||
&remote_port);
|
&remote_port);
|
||||||
if (!ret) {
|
if (!ret) {
|
||||||
|
@ -2384,6 +2383,9 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
||||||
ndlp->nlp_type, ndlp->nlp_DID, ndlp);
|
ndlp->nlp_type, ndlp->nlp_DID, ndlp);
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
|
#else
|
||||||
|
return 0;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
/* lpfc_nvme_unregister_port - unbind the DID and port_role from this rport.
|
/* lpfc_nvme_unregister_port - unbind the DID and port_role from this rport.
|
||||||
|
@ -2401,6 +2403,7 @@ lpfc_nvme_register_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
||||||
void
|
void
|
||||||
lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
||||||
{
|
{
|
||||||
|
#ifdef CONFIG_LPFC_NVME_INITIATOR
|
||||||
int ret;
|
int ret;
|
||||||
struct nvme_fc_local_port *localport;
|
struct nvme_fc_local_port *localport;
|
||||||
struct lpfc_nvme_lport *lport;
|
struct lpfc_nvme_lport *lport;
|
||||||
|
@ -2458,7 +2461,61 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
input_err:
|
input_err:
|
||||||
|
#endif
|
||||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
|
lpfc_printf_vlog(vport, KERN_ERR, LOG_NVME_DISC,
|
||||||
"6168: State error: lport %p, rport%p FCID x%06x\n",
|
"6168: State error: lport %p, rport%p FCID x%06x\n",
|
||||||
vport->localport, ndlp->rport, ndlp->nlp_DID);
|
vport->localport, ndlp->rport, ndlp->nlp_DID);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* lpfc_sli4_nvme_xri_aborted - Fast-path process of NVME xri abort
|
||||||
|
* @phba: pointer to lpfc hba data structure.
|
||||||
|
* @axri: pointer to the fcp xri abort wcqe structure.
|
||||||
|
*
|
||||||
|
* This routine is invoked by the worker thread to process a SLI4 fast-path
|
||||||
|
* FCP aborted xri.
|
||||||
|
**/
|
||||||
|
void
|
||||||
|
lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
|
||||||
|
struct sli4_wcqe_xri_aborted *axri)
|
||||||
|
{
|
||||||
|
uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
|
||||||
|
uint16_t rxid = bf_get(lpfc_wcqe_xa_remote_xid, axri);
|
||||||
|
struct lpfc_nvme_buf *lpfc_ncmd, *next_lpfc_ncmd;
|
||||||
|
struct lpfc_nodelist *ndlp;
|
||||||
|
unsigned long iflag = 0;
|
||||||
|
int rrq_empty = 0;
|
||||||
|
|
||||||
|
if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME))
|
||||||
|
return;
|
||||||
|
spin_lock_irqsave(&phba->hbalock, iflag);
|
||||||
|
spin_lock(&phba->sli4_hba.abts_nvme_buf_list_lock);
|
||||||
|
list_for_each_entry_safe(lpfc_ncmd, next_lpfc_ncmd,
|
||||||
|
&phba->sli4_hba.lpfc_abts_nvme_buf_list,
|
||||||
|
list) {
|
||||||
|
if (lpfc_ncmd->cur_iocbq.sli4_xritag == xri) {
|
||||||
|
list_del(&lpfc_ncmd->list);
|
||||||
|
lpfc_ncmd->flags &= ~LPFC_SBUF_XBUSY;
|
||||||
|
lpfc_ncmd->status = IOSTAT_SUCCESS;
|
||||||
|
spin_unlock(
|
||||||
|
&phba->sli4_hba.abts_nvme_buf_list_lock);
|
||||||
|
|
||||||
|
rrq_empty = list_empty(&phba->active_rrq_list);
|
||||||
|
spin_unlock_irqrestore(&phba->hbalock, iflag);
|
||||||
|
ndlp = lpfc_ncmd->ndlp;
|
||||||
|
if (ndlp) {
|
||||||
|
lpfc_set_rrq_active(
|
||||||
|
phba, ndlp,
|
||||||
|
lpfc_ncmd->cur_iocbq.sli4_lxritag,
|
||||||
|
rxid, 1);
|
||||||
|
lpfc_sli4_abts_err_handler(phba, ndlp, axri);
|
||||||
|
}
|
||||||
|
lpfc_release_nvme_buf(phba, lpfc_ncmd);
|
||||||
|
if (rrq_empty)
|
||||||
|
lpfc_worker_wake_up(phba);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
spin_unlock(&phba->sli4_hba.abts_nvme_buf_list_lock);
|
||||||
|
spin_unlock_irqrestore(&phba->hbalock, iflag);
|
||||||
|
}
|
||||||
|
|
|
@ -57,6 +57,7 @@ struct lpfc_nvme_buf {
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
struct nvmefc_fcp_req *nvmeCmd;
|
struct nvmefc_fcp_req *nvmeCmd;
|
||||||
struct lpfc_nvme_rport *nrport;
|
struct lpfc_nvme_rport *nrport;
|
||||||
|
struct lpfc_nodelist *ndlp;
|
||||||
|
|
||||||
uint32_t timeout;
|
uint32_t timeout;
|
||||||
|
|
||||||
|
|
|
@ -571,6 +571,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
|
||||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
||||||
"6102 Bad state IO x%x aborted\n",
|
"6102 Bad state IO x%x aborted\n",
|
||||||
ctxp->oxid);
|
ctxp->oxid);
|
||||||
|
rc = -ENXIO;
|
||||||
goto aerr;
|
goto aerr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -580,6 +581,7 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
|
||||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
||||||
"6152 FCP Drop IO x%x: Prep\n",
|
"6152 FCP Drop IO x%x: Prep\n",
|
||||||
ctxp->oxid);
|
ctxp->oxid);
|
||||||
|
rc = -ENXIO;
|
||||||
goto aerr;
|
goto aerr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -618,8 +620,9 @@ lpfc_nvmet_xmt_fcp_op(struct nvmet_fc_target_port *tgtport,
|
||||||
ctxp->wqeq->hba_wqidx = 0;
|
ctxp->wqeq->hba_wqidx = 0;
|
||||||
nvmewqeq->context2 = NULL;
|
nvmewqeq->context2 = NULL;
|
||||||
nvmewqeq->context3 = NULL;
|
nvmewqeq->context3 = NULL;
|
||||||
|
rc = -EBUSY;
|
||||||
aerr:
|
aerr:
|
||||||
return -ENXIO;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
|
@ -668,9 +671,13 @@ lpfc_nvmet_create_targetport(struct lpfc_hba *phba)
|
||||||
lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP |
|
lpfc_tgttemplate.target_features = NVMET_FCTGTFEAT_READDATA_RSP |
|
||||||
NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED;
|
NVMET_FCTGTFEAT_NEEDS_CMD_CPUSCHED;
|
||||||
|
|
||||||
|
#ifdef CONFIG_LPFC_NVME_TARGET
|
||||||
error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate,
|
error = nvmet_fc_register_targetport(&pinfo, &lpfc_tgttemplate,
|
||||||
&phba->pcidev->dev,
|
&phba->pcidev->dev,
|
||||||
&phba->targetport);
|
&phba->targetport);
|
||||||
|
#else
|
||||||
|
error = -ENOMEM;
|
||||||
|
#endif
|
||||||
if (error) {
|
if (error) {
|
||||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
|
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
|
||||||
"6025 Cannot register NVME targetport "
|
"6025 Cannot register NVME targetport "
|
||||||
|
@ -731,9 +738,25 @@ lpfc_nvmet_update_targetport(struct lpfc_hba *phba)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* lpfc_sli4_nvmet_xri_aborted - Fast-path process of nvmet xri abort
|
||||||
|
* @phba: pointer to lpfc hba data structure.
|
||||||
|
* @axri: pointer to the nvmet xri abort wcqe structure.
|
||||||
|
*
|
||||||
|
* This routine is invoked by the worker thread to process a SLI4 fast-path
|
||||||
|
* NVMET aborted xri.
|
||||||
|
**/
|
||||||
|
void
|
||||||
|
lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
|
||||||
|
struct sli4_wcqe_xri_aborted *axri)
|
||||||
|
{
|
||||||
|
/* TODO: work in progress */
|
||||||
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
|
lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
|
||||||
{
|
{
|
||||||
|
#ifdef CONFIG_LPFC_NVME_TARGET
|
||||||
struct lpfc_nvmet_tgtport *tgtp;
|
struct lpfc_nvmet_tgtport *tgtp;
|
||||||
|
|
||||||
if (phba->nvmet_support == 0)
|
if (phba->nvmet_support == 0)
|
||||||
|
@ -745,6 +768,7 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
|
||||||
wait_for_completion_timeout(&tgtp->tport_unreg_done, 5);
|
wait_for_completion_timeout(&tgtp->tport_unreg_done, 5);
|
||||||
}
|
}
|
||||||
phba->targetport = NULL;
|
phba->targetport = NULL;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -764,6 +788,7 @@ static void
|
||||||
lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
||||||
struct hbq_dmabuf *nvmebuf)
|
struct hbq_dmabuf *nvmebuf)
|
||||||
{
|
{
|
||||||
|
#ifdef CONFIG_LPFC_NVME_TARGET
|
||||||
struct lpfc_nvmet_tgtport *tgtp;
|
struct lpfc_nvmet_tgtport *tgtp;
|
||||||
struct fc_frame_header *fc_hdr;
|
struct fc_frame_header *fc_hdr;
|
||||||
struct lpfc_nvmet_rcv_ctx *ctxp;
|
struct lpfc_nvmet_rcv_ctx *ctxp;
|
||||||
|
@ -844,6 +869,7 @@ lpfc_nvmet_unsol_ls_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
||||||
|
|
||||||
atomic_inc(&tgtp->xmt_ls_abort);
|
atomic_inc(&tgtp->xmt_ls_abort);
|
||||||
lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, sid, oxid);
|
lpfc_nvmet_unsol_ls_issue_abort(phba, ctxp, sid, oxid);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -865,6 +891,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
|
||||||
struct rqb_dmabuf *nvmebuf,
|
struct rqb_dmabuf *nvmebuf,
|
||||||
uint64_t isr_timestamp)
|
uint64_t isr_timestamp)
|
||||||
{
|
{
|
||||||
|
#ifdef CONFIG_LPFC_NVME_TARGET
|
||||||
struct lpfc_nvmet_rcv_ctx *ctxp;
|
struct lpfc_nvmet_rcv_ctx *ctxp;
|
||||||
struct lpfc_nvmet_tgtport *tgtp;
|
struct lpfc_nvmet_tgtport *tgtp;
|
||||||
struct fc_frame_header *fc_hdr;
|
struct fc_frame_header *fc_hdr;
|
||||||
|
@ -955,7 +982,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
|
||||||
|
|
||||||
atomic_inc(&tgtp->rcv_fcp_cmd_drop);
|
atomic_inc(&tgtp->rcv_fcp_cmd_drop);
|
||||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
lpfc_printf_log(phba, KERN_ERR, LOG_NVME_IOERR,
|
||||||
"6159 FCP Drop IO x%x: nvmet_fc_rcv_fcp_req x%x\n",
|
"6159 FCP Drop IO x%x: err x%x\n",
|
||||||
ctxp->oxid, rc);
|
ctxp->oxid, rc);
|
||||||
dropit:
|
dropit:
|
||||||
lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
|
lpfc_nvmeio_data(phba, "NVMET FCP DROP: xri x%x sz %d from %06x\n",
|
||||||
|
@ -970,6 +997,7 @@ lpfc_nvmet_unsol_fcp_buffer(struct lpfc_hba *phba,
|
||||||
/* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */
|
/* We assume a rcv'ed cmd ALWAYs fits into 1 buffer */
|
||||||
lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf);
|
lpfc_nvmet_rq_post(phba, NULL, &nvmebuf->hbuf);
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1114,7 +1142,7 @@ lpfc_nvmet_prep_ls_wqe(struct lpfc_hba *phba,
|
||||||
bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
|
bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
|
||||||
bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, 1);
|
bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, 1);
|
||||||
bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 0);
|
bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 0);
|
||||||
bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_DD_SOL_CTL);
|
bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, FC_RCTL_ELS4_REP);
|
||||||
bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_NVME);
|
bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_NVME);
|
||||||
|
|
||||||
/* Word 6 */
|
/* Word 6 */
|
||||||
|
@ -1445,7 +1473,6 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba,
|
||||||
|
|
||||||
case NVMET_FCOP_RSP:
|
case NVMET_FCOP_RSP:
|
||||||
/* Words 0 - 2 */
|
/* Words 0 - 2 */
|
||||||
sgel = &rsp->sg[0];
|
|
||||||
physaddr = rsp->rspdma;
|
physaddr = rsp->rspdma;
|
||||||
wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
|
wqe->fcp_trsp.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
|
||||||
wqe->fcp_trsp.bde.tus.f.bdeSize = rsp->rsplen;
|
wqe->fcp_trsp.bde.tus.f.bdeSize = rsp->rsplen;
|
||||||
|
@ -1681,8 +1708,8 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
|
||||||
struct lpfc_nodelist *ndlp;
|
struct lpfc_nodelist *ndlp;
|
||||||
|
|
||||||
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
|
lpfc_printf_log(phba, KERN_INFO, LOG_NVME_ABTS,
|
||||||
"6067 %s: Entrypoint: sid %x xri %x\n", __func__,
|
"6067 Abort: sid %x xri x%x/x%x\n",
|
||||||
sid, xri);
|
sid, xri, ctxp->wqeq->sli4_xritag);
|
||||||
|
|
||||||
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
|
tgtp = (struct lpfc_nvmet_tgtport *)phba->targetport->private;
|
||||||
|
|
||||||
|
@ -1693,7 +1720,7 @@ lpfc_nvmet_unsol_issue_abort(struct lpfc_hba *phba,
|
||||||
atomic_inc(&tgtp->xmt_abort_rsp_error);
|
atomic_inc(&tgtp->xmt_abort_rsp_error);
|
||||||
lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
|
lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
|
||||||
"6134 Drop ABTS - wrong NDLP state x%x.\n",
|
"6134 Drop ABTS - wrong NDLP state x%x.\n",
|
||||||
ndlp->nlp_state);
|
(ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
|
||||||
|
|
||||||
/* No failure to an ABTS request. */
|
/* No failure to an ABTS request. */
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -1791,7 +1818,7 @@ lpfc_nvmet_sol_fcp_issue_abort(struct lpfc_hba *phba,
|
||||||
atomic_inc(&tgtp->xmt_abort_rsp_error);
|
atomic_inc(&tgtp->xmt_abort_rsp_error);
|
||||||
lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
|
lpfc_printf_log(phba, KERN_WARNING, LOG_NVME_ABTS,
|
||||||
"6160 Drop ABTS - wrong NDLP state x%x.\n",
|
"6160 Drop ABTS - wrong NDLP state x%x.\n",
|
||||||
ndlp->nlp_state);
|
(ndlp) ? ndlp->nlp_state : NLP_STE_MAX_STATE);
|
||||||
|
|
||||||
/* No failure to an ABTS request. */
|
/* No failure to an ABTS request. */
|
||||||
return 0;
|
return 0;
|
||||||
|
|
|
@ -5953,12 +5953,13 @@ struct scsi_host_template lpfc_template_nvme = {
|
||||||
.track_queue_depth = 0,
|
.track_queue_depth = 0,
|
||||||
};
|
};
|
||||||
|
|
||||||
struct scsi_host_template lpfc_template_s3 = {
|
struct scsi_host_template lpfc_template_no_hr = {
|
||||||
.module = THIS_MODULE,
|
.module = THIS_MODULE,
|
||||||
.name = LPFC_DRIVER_NAME,
|
.name = LPFC_DRIVER_NAME,
|
||||||
.proc_name = LPFC_DRIVER_NAME,
|
.proc_name = LPFC_DRIVER_NAME,
|
||||||
.info = lpfc_info,
|
.info = lpfc_info,
|
||||||
.queuecommand = lpfc_queuecommand,
|
.queuecommand = lpfc_queuecommand,
|
||||||
|
.eh_timed_out = fc_eh_timed_out,
|
||||||
.eh_abort_handler = lpfc_abort_handler,
|
.eh_abort_handler = lpfc_abort_handler,
|
||||||
.eh_device_reset_handler = lpfc_device_reset_handler,
|
.eh_device_reset_handler = lpfc_device_reset_handler,
|
||||||
.eh_target_reset_handler = lpfc_target_reset_handler,
|
.eh_target_reset_handler = lpfc_target_reset_handler,
|
||||||
|
@ -6015,7 +6016,6 @@ struct scsi_host_template lpfc_vport_template = {
|
||||||
.eh_abort_handler = lpfc_abort_handler,
|
.eh_abort_handler = lpfc_abort_handler,
|
||||||
.eh_device_reset_handler = lpfc_device_reset_handler,
|
.eh_device_reset_handler = lpfc_device_reset_handler,
|
||||||
.eh_target_reset_handler = lpfc_target_reset_handler,
|
.eh_target_reset_handler = lpfc_target_reset_handler,
|
||||||
.eh_bus_reset_handler = lpfc_bus_reset_handler,
|
|
||||||
.slave_alloc = lpfc_slave_alloc,
|
.slave_alloc = lpfc_slave_alloc,
|
||||||
.slave_configure = lpfc_slave_configure,
|
.slave_configure = lpfc_slave_configure,
|
||||||
.slave_destroy = lpfc_slave_destroy,
|
.slave_destroy = lpfc_slave_destroy,
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
|
||||||
/*******************************************************************
|
/*******************************************************************
|
||||||
* This file is part of the Emulex Linux Device Driver for *
|
* This file is part of the Emulex Linux Device Driver for *
|
||||||
* Fibre Channel Host Bus Adapters. *
|
* Fibre Channel Host Bus Adapters. *
|
||||||
|
@ -952,7 +953,7 @@ __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
|
||||||
start_sglq = sglq;
|
start_sglq = sglq;
|
||||||
while (!found) {
|
while (!found) {
|
||||||
if (!sglq)
|
if (!sglq)
|
||||||
return NULL;
|
break;
|
||||||
if (ndlp && ndlp->active_rrqs_xri_bitmap &&
|
if (ndlp && ndlp->active_rrqs_xri_bitmap &&
|
||||||
test_bit(sglq->sli4_lxritag,
|
test_bit(sglq->sli4_lxritag,
|
||||||
ndlp->active_rrqs_xri_bitmap)) {
|
ndlp->active_rrqs_xri_bitmap)) {
|
||||||
|
@ -12212,6 +12213,41 @@ void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* lpfc_sli4_nvme_xri_abort_event_proc - Process nvme xri abort event
|
||||||
|
* @phba: pointer to lpfc hba data structure.
|
||||||
|
*
|
||||||
|
* This routine is invoked by the worker thread to process all the pending
|
||||||
|
* SLI4 NVME abort XRI events.
|
||||||
|
**/
|
||||||
|
void lpfc_sli4_nvme_xri_abort_event_proc(struct lpfc_hba *phba)
|
||||||
|
{
|
||||||
|
struct lpfc_cq_event *cq_event;
|
||||||
|
|
||||||
|
/* First, declare the fcp xri abort event has been handled */
|
||||||
|
spin_lock_irq(&phba->hbalock);
|
||||||
|
phba->hba_flag &= ~NVME_XRI_ABORT_EVENT;
|
||||||
|
spin_unlock_irq(&phba->hbalock);
|
||||||
|
/* Now, handle all the fcp xri abort events */
|
||||||
|
while (!list_empty(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue)) {
|
||||||
|
/* Get the first event from the head of the event queue */
|
||||||
|
spin_lock_irq(&phba->hbalock);
|
||||||
|
list_remove_head(&phba->sli4_hba.sp_nvme_xri_aborted_work_queue,
|
||||||
|
cq_event, struct lpfc_cq_event, list);
|
||||||
|
spin_unlock_irq(&phba->hbalock);
|
||||||
|
/* Notify aborted XRI for NVME work queue */
|
||||||
|
if (phba->nvmet_support) {
|
||||||
|
lpfc_sli4_nvmet_xri_aborted(phba,
|
||||||
|
&cq_event->cqe.wcqe_axri);
|
||||||
|
} else {
|
||||||
|
lpfc_sli4_nvme_xri_aborted(phba,
|
||||||
|
&cq_event->cqe.wcqe_axri);
|
||||||
|
}
|
||||||
|
/* Free the event processed back to the free pool */
|
||||||
|
lpfc_sli4_cq_event_release(phba, cq_event);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
|
* lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
|
||||||
* @phba: pointer to lpfc hba data structure.
|
* @phba: pointer to lpfc hba data structure.
|
||||||
|
@ -12709,10 +12745,22 @@ lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
|
||||||
spin_unlock_irqrestore(&phba->hbalock, iflags);
|
spin_unlock_irqrestore(&phba->hbalock, iflags);
|
||||||
workposted = true;
|
workposted = true;
|
||||||
break;
|
break;
|
||||||
|
case LPFC_NVME:
|
||||||
|
spin_lock_irqsave(&phba->hbalock, iflags);
|
||||||
|
list_add_tail(&cq_event->list,
|
||||||
|
&phba->sli4_hba.sp_nvme_xri_aborted_work_queue);
|
||||||
|
/* Set the nvme xri abort event flag */
|
||||||
|
phba->hba_flag |= NVME_XRI_ABORT_EVENT;
|
||||||
|
spin_unlock_irqrestore(&phba->hbalock, iflags);
|
||||||
|
workposted = true;
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
||||||
"0603 Invalid work queue CQE subtype (x%x)\n",
|
"0603 Invalid CQ subtype %d: "
|
||||||
cq->subtype);
|
"%08x %08x %08x %08x\n",
|
||||||
|
cq->subtype, wcqe->word0, wcqe->parameter,
|
||||||
|
wcqe->word2, wcqe->word3);
|
||||||
|
lpfc_sli4_cq_event_release(phba, cq_event);
|
||||||
workposted = false;
|
workposted = false;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -13827,6 +13875,8 @@ lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
|
||||||
* @startq: The starting FCP EQ to modify
|
* @startq: The starting FCP EQ to modify
|
||||||
*
|
*
|
||||||
* This function sends an MODIFY_EQ_DELAY mailbox command to the HBA.
|
* This function sends an MODIFY_EQ_DELAY mailbox command to the HBA.
|
||||||
|
* The command allows up to LPFC_MAX_EQ_DELAY_EQID_CNT EQ ID's to be
|
||||||
|
* updated in one mailbox command.
|
||||||
*
|
*
|
||||||
* The @phba struct is used to send mailbox command to HBA. The @startq
|
* The @phba struct is used to send mailbox command to HBA. The @startq
|
||||||
* is used to get the starting FCP EQ to change.
|
* is used to get the starting FCP EQ to change.
|
||||||
|
@ -13879,7 +13929,7 @@ lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq)
|
||||||
eq_delay->u.request.eq[cnt].phase = 0;
|
eq_delay->u.request.eq[cnt].phase = 0;
|
||||||
eq_delay->u.request.eq[cnt].delay_multi = dmult;
|
eq_delay->u.request.eq[cnt].delay_multi = dmult;
|
||||||
cnt++;
|
cnt++;
|
||||||
if (cnt >= LPFC_MAX_EQ_DELAY)
|
if (cnt >= LPFC_MAX_EQ_DELAY_EQID_CNT)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
eq_delay->u.request.num_eq = cnt;
|
eq_delay->u.request.num_eq = cnt;
|
||||||
|
@ -15185,17 +15235,17 @@ lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
|
||||||
drq = drqp[idx];
|
drq = drqp[idx];
|
||||||
cq = cqp[idx];
|
cq = cqp[idx];
|
||||||
|
|
||||||
if (hrq->entry_count != drq->entry_count) {
|
|
||||||
status = -EINVAL;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* sanity check on queue memory */
|
/* sanity check on queue memory */
|
||||||
if (!hrq || !drq || !cq) {
|
if (!hrq || !drq || !cq) {
|
||||||
status = -ENODEV;
|
status = -ENODEV;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (hrq->entry_count != drq->entry_count) {
|
||||||
|
status = -EINVAL;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
if (idx == 0) {
|
if (idx == 0) {
|
||||||
bf_set(lpfc_mbx_rq_create_num_pages,
|
bf_set(lpfc_mbx_rq_create_num_pages,
|
||||||
&rq_create->u.request,
|
&rq_create->u.request,
|
||||||
|
|
|
@ -642,6 +642,7 @@ struct lpfc_sli4_hba {
|
||||||
struct list_head sp_asynce_work_queue;
|
struct list_head sp_asynce_work_queue;
|
||||||
struct list_head sp_fcp_xri_aborted_work_queue;
|
struct list_head sp_fcp_xri_aborted_work_queue;
|
||||||
struct list_head sp_els_xri_aborted_work_queue;
|
struct list_head sp_els_xri_aborted_work_queue;
|
||||||
|
struct list_head sp_nvme_xri_aborted_work_queue;
|
||||||
struct list_head sp_unsol_work_queue;
|
struct list_head sp_unsol_work_queue;
|
||||||
struct lpfc_sli4_link link_state;
|
struct lpfc_sli4_link link_state;
|
||||||
struct lpfc_sli4_lnk_info lnk_info;
|
struct lpfc_sli4_lnk_info lnk_info;
|
||||||
|
@ -794,9 +795,14 @@ void lpfc_sli4_fcf_redisc_event_proc(struct lpfc_hba *);
|
||||||
int lpfc_sli4_resume_rpi(struct lpfc_nodelist *,
|
int lpfc_sli4_resume_rpi(struct lpfc_nodelist *,
|
||||||
void (*)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *);
|
void (*)(struct lpfc_hba *, LPFC_MBOXQ_t *), void *);
|
||||||
void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *);
|
void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *);
|
||||||
|
void lpfc_sli4_nvme_xri_abort_event_proc(struct lpfc_hba *phba);
|
||||||
void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *);
|
void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *);
|
||||||
void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *,
|
void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *,
|
||||||
struct sli4_wcqe_xri_aborted *);
|
struct sli4_wcqe_xri_aborted *);
|
||||||
|
void lpfc_sli4_nvme_xri_aborted(struct lpfc_hba *phba,
|
||||||
|
struct sli4_wcqe_xri_aborted *axri);
|
||||||
|
void lpfc_sli4_nvmet_xri_aborted(struct lpfc_hba *phba,
|
||||||
|
struct sli4_wcqe_xri_aborted *axri);
|
||||||
void lpfc_sli4_els_xri_aborted(struct lpfc_hba *,
|
void lpfc_sli4_els_xri_aborted(struct lpfc_hba *,
|
||||||
struct sli4_wcqe_xri_aborted *);
|
struct sli4_wcqe_xri_aborted *);
|
||||||
void lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *);
|
void lpfc_sli4_vport_delete_els_xri_aborted(struct lpfc_vport *);
|
||||||
|
|
|
@ -20,7 +20,7 @@
|
||||||
* included with this package. *
|
* included with this package. *
|
||||||
*******************************************************************/
|
*******************************************************************/
|
||||||
|
|
||||||
#define LPFC_DRIVER_VERSION "11.2.0.7"
|
#define LPFC_DRIVER_VERSION "11.2.0.10"
|
||||||
#define LPFC_DRIVER_NAME "lpfc"
|
#define LPFC_DRIVER_NAME "lpfc"
|
||||||
|
|
||||||
/* Used for SLI 2/3 */
|
/* Used for SLI 2/3 */
|
||||||
|
|
|
@ -1442,9 +1442,6 @@ void mpt3sas_transport_update_links(struct MPT3SAS_ADAPTER *ioc,
|
||||||
u64 sas_address, u16 handle, u8 phy_number, u8 link_rate);
|
u64 sas_address, u16 handle, u8 phy_number, u8 link_rate);
|
||||||
extern struct sas_function_template mpt3sas_transport_functions;
|
extern struct sas_function_template mpt3sas_transport_functions;
|
||||||
extern struct scsi_transport_template *mpt3sas_transport_template;
|
extern struct scsi_transport_template *mpt3sas_transport_template;
|
||||||
extern int scsi_internal_device_block(struct scsi_device *sdev);
|
|
||||||
extern int scsi_internal_device_unblock(struct scsi_device *sdev,
|
|
||||||
enum scsi_device_state new_state);
|
|
||||||
/* trigger data externs */
|
/* trigger data externs */
|
||||||
void mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc,
|
void mpt3sas_send_trigger_data_event(struct MPT3SAS_ADAPTER *ioc,
|
||||||
struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data);
|
struct SL_WH_TRIGGERS_EVENT_DATA_T *event_data);
|
||||||
|
|
|
@ -2859,7 +2859,7 @@ _scsih_internal_device_block(struct scsi_device *sdev,
|
||||||
sas_device_priv_data->sas_target->handle);
|
sas_device_priv_data->sas_target->handle);
|
||||||
sas_device_priv_data->block = 1;
|
sas_device_priv_data->block = 1;
|
||||||
|
|
||||||
r = scsi_internal_device_block(sdev);
|
r = scsi_internal_device_block(sdev, false);
|
||||||
if (r == -EINVAL)
|
if (r == -EINVAL)
|
||||||
sdev_printk(KERN_WARNING, sdev,
|
sdev_printk(KERN_WARNING, sdev,
|
||||||
"device_block failed with return(%d) for handle(0x%04x)\n",
|
"device_block failed with return(%d) for handle(0x%04x)\n",
|
||||||
|
@ -2895,7 +2895,7 @@ _scsih_internal_device_unblock(struct scsi_device *sdev,
|
||||||
"performing a block followed by an unblock\n",
|
"performing a block followed by an unblock\n",
|
||||||
r, sas_device_priv_data->sas_target->handle);
|
r, sas_device_priv_data->sas_target->handle);
|
||||||
sas_device_priv_data->block = 1;
|
sas_device_priv_data->block = 1;
|
||||||
r = scsi_internal_device_block(sdev);
|
r = scsi_internal_device_block(sdev, false);
|
||||||
if (r)
|
if (r)
|
||||||
sdev_printk(KERN_WARNING, sdev, "retried device_block "
|
sdev_printk(KERN_WARNING, sdev, "retried device_block "
|
||||||
"failed with return(%d) for handle(0x%04x)\n",
|
"failed with return(%d) for handle(0x%04x)\n",
|
||||||
|
@ -4677,7 +4677,6 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
|
||||||
struct MPT3SAS_DEVICE *sas_device_priv_data;
|
struct MPT3SAS_DEVICE *sas_device_priv_data;
|
||||||
u32 response_code = 0;
|
u32 response_code = 0;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
unsigned int sector_sz;
|
|
||||||
|
|
||||||
mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
|
mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
|
||||||
|
|
||||||
|
@ -4742,20 +4741,6 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
|
||||||
}
|
}
|
||||||
|
|
||||||
xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
|
xfer_cnt = le32_to_cpu(mpi_reply->TransferCount);
|
||||||
|
|
||||||
/* In case of bogus fw or device, we could end up having
|
|
||||||
* unaligned partial completion. We can force alignment here,
|
|
||||||
* then scsi-ml does not need to handle this misbehavior.
|
|
||||||
*/
|
|
||||||
sector_sz = scmd->device->sector_size;
|
|
||||||
if (unlikely(!blk_rq_is_passthrough(scmd->request) && sector_sz &&
|
|
||||||
xfer_cnt % sector_sz)) {
|
|
||||||
sdev_printk(KERN_INFO, scmd->device,
|
|
||||||
"unaligned partial completion avoided (xfer_cnt=%u, sector_sz=%u)\n",
|
|
||||||
xfer_cnt, sector_sz);
|
|
||||||
xfer_cnt = round_down(xfer_cnt, sector_sz);
|
|
||||||
}
|
|
||||||
|
|
||||||
scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
|
scsi_set_resid(scmd, scsi_bufflen(scmd) - xfer_cnt);
|
||||||
if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
|
if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
|
||||||
log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
|
log_info = le32_to_cpu(mpi_reply->IOCLogInfo);
|
||||||
|
|
|
@ -81,14 +81,17 @@ struct qedf_dbg_ctx {
|
||||||
#define QEDF_INFO(pdev, level, fmt, ...) \
|
#define QEDF_INFO(pdev, level, fmt, ...) \
|
||||||
qedf_dbg_info(pdev, __func__, __LINE__, level, fmt, \
|
qedf_dbg_info(pdev, __func__, __LINE__, level, fmt, \
|
||||||
## __VA_ARGS__)
|
## __VA_ARGS__)
|
||||||
|
__printf(4, 5)
|
||||||
extern void qedf_dbg_err(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
|
void qedf_dbg_err(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
|
||||||
const char *fmt, ...);
|
const char *fmt, ...);
|
||||||
extern void qedf_dbg_warn(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
|
__printf(4, 5)
|
||||||
|
void qedf_dbg_warn(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
|
||||||
const char *, ...);
|
const char *, ...);
|
||||||
extern void qedf_dbg_notice(struct qedf_dbg_ctx *qedf, const char *func,
|
__printf(4, 5)
|
||||||
|
void qedf_dbg_notice(struct qedf_dbg_ctx *qedf, const char *func,
|
||||||
u32 line, const char *, ...);
|
u32 line, const char *, ...);
|
||||||
extern void qedf_dbg_info(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
|
__printf(5, 6)
|
||||||
|
void qedf_dbg_info(struct qedf_dbg_ctx *qedf, const char *func, u32 line,
|
||||||
u32 info, const char *fmt, ...);
|
u32 info, const char *fmt, ...);
|
||||||
|
|
||||||
/* GRC Dump related defines */
|
/* GRC Dump related defines */
|
||||||
|
|
|
@ -203,7 +203,7 @@ void qedf_fip_recv(struct qedf_ctx *qedf, struct sk_buff *skb)
|
||||||
case FIP_DT_MAC:
|
case FIP_DT_MAC:
|
||||||
mp = (struct fip_mac_desc *)desc;
|
mp = (struct fip_mac_desc *)desc;
|
||||||
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
|
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_LL2,
|
||||||
"fd_mac=%pM.\n", __func__, mp->fd_mac);
|
"fd_mac=%pM\n", mp->fd_mac);
|
||||||
ether_addr_copy(cvl_mac, mp->fd_mac);
|
ether_addr_copy(cvl_mac, mp->fd_mac);
|
||||||
break;
|
break;
|
||||||
case FIP_DT_NAME:
|
case FIP_DT_NAME:
|
||||||
|
|
|
@ -1342,7 +1342,7 @@ void qedf_scsi_completion(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
|
||||||
} else {
|
} else {
|
||||||
refcount = kref_read(&io_req->refcount);
|
refcount = kref_read(&io_req->refcount);
|
||||||
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
|
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO,
|
||||||
"%d:0:%d:%d xid=0x%0x op=0x%02x "
|
"%d:0:%d:%lld xid=0x%0x op=0x%02x "
|
||||||
"lba=%02x%02x%02x%02x cdb_status=%d "
|
"lba=%02x%02x%02x%02x cdb_status=%d "
|
||||||
"fcp_resid=0x%x refcount=%d.\n",
|
"fcp_resid=0x%x refcount=%d.\n",
|
||||||
qedf->lport->host->host_no, sc_cmd->device->id,
|
qedf->lport->host->host_no, sc_cmd->device->id,
|
||||||
|
@ -1426,7 +1426,7 @@ void qedf_scsi_done(struct qedf_ctx *qedf, struct qedf_ioreq *io_req,
|
||||||
|
|
||||||
sc_cmd->result = result << 16;
|
sc_cmd->result = result << 16;
|
||||||
refcount = kref_read(&io_req->refcount);
|
refcount = kref_read(&io_req->refcount);
|
||||||
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "%d:0:%d:%d: Completing "
|
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_IO, "%d:0:%d:%lld: Completing "
|
||||||
"sc_cmd=%p result=0x%08x op=0x%02x lba=0x%02x%02x%02x%02x, "
|
"sc_cmd=%p result=0x%08x op=0x%02x lba=0x%02x%02x%02x%02x, "
|
||||||
"allowed=%d retries=%d refcount=%d.\n",
|
"allowed=%d retries=%d refcount=%d.\n",
|
||||||
qedf->lport->host->host_no, sc_cmd->device->id,
|
qedf->lport->host->host_no, sc_cmd->device->id,
|
||||||
|
|
|
@ -2456,8 +2456,8 @@ static int qedf_alloc_bdq(struct qedf_ctx *qedf)
|
||||||
}
|
}
|
||||||
|
|
||||||
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
|
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
|
||||||
"BDQ PBL addr=0x%p dma=0x%llx.\n", qedf->bdq_pbl,
|
"BDQ PBL addr=0x%p dma=%pad\n",
|
||||||
qedf->bdq_pbl_dma);
|
qedf->bdq_pbl, &qedf->bdq_pbl_dma);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Populate BDQ PBL with physical and virtual address of individual
|
* Populate BDQ PBL with physical and virtual address of individual
|
||||||
|
|
|
@ -14,7 +14,7 @@
|
||||||
#include <linux/debugfs.h>
|
#include <linux/debugfs.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
|
|
||||||
int do_not_recover;
|
int qedi_do_not_recover;
|
||||||
static struct dentry *qedi_dbg_root;
|
static struct dentry *qedi_dbg_root;
|
||||||
|
|
||||||
void
|
void
|
||||||
|
@ -74,22 +74,22 @@ qedi_dbg_exit(void)
|
||||||
static ssize_t
|
static ssize_t
|
||||||
qedi_dbg_do_not_recover_enable(struct qedi_dbg_ctx *qedi_dbg)
|
qedi_dbg_do_not_recover_enable(struct qedi_dbg_ctx *qedi_dbg)
|
||||||
{
|
{
|
||||||
if (!do_not_recover)
|
if (!qedi_do_not_recover)
|
||||||
do_not_recover = 1;
|
qedi_do_not_recover = 1;
|
||||||
|
|
||||||
QEDI_INFO(qedi_dbg, QEDI_LOG_DEBUGFS, "do_not_recover=%d\n",
|
QEDI_INFO(qedi_dbg, QEDI_LOG_DEBUGFS, "do_not_recover=%d\n",
|
||||||
do_not_recover);
|
qedi_do_not_recover);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static ssize_t
|
static ssize_t
|
||||||
qedi_dbg_do_not_recover_disable(struct qedi_dbg_ctx *qedi_dbg)
|
qedi_dbg_do_not_recover_disable(struct qedi_dbg_ctx *qedi_dbg)
|
||||||
{
|
{
|
||||||
if (do_not_recover)
|
if (qedi_do_not_recover)
|
||||||
do_not_recover = 0;
|
qedi_do_not_recover = 0;
|
||||||
|
|
||||||
QEDI_INFO(qedi_dbg, QEDI_LOG_DEBUGFS, "do_not_recover=%d\n",
|
QEDI_INFO(qedi_dbg, QEDI_LOG_DEBUGFS, "do_not_recover=%d\n",
|
||||||
do_not_recover);
|
qedi_do_not_recover);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -141,7 +141,7 @@ qedi_dbg_do_not_recover_cmd_read(struct file *filp, char __user *buffer,
|
||||||
if (*ppos)
|
if (*ppos)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
cnt = sprintf(buffer, "do_not_recover=%d\n", do_not_recover);
|
cnt = sprintf(buffer, "do_not_recover=%d\n", qedi_do_not_recover);
|
||||||
cnt = min_t(int, count, cnt - *ppos);
|
cnt = min_t(int, count, cnt - *ppos);
|
||||||
*ppos += cnt;
|
*ppos += cnt;
|
||||||
return cnt;
|
return cnt;
|
||||||
|
|
|
@ -1461,9 +1461,9 @@ static void qedi_tmf_work(struct work_struct *work)
|
||||||
get_itt(tmf_hdr->rtt), get_itt(ctask->itt), cmd->task_id,
|
get_itt(tmf_hdr->rtt), get_itt(ctask->itt), cmd->task_id,
|
||||||
qedi_conn->iscsi_conn_id);
|
qedi_conn->iscsi_conn_id);
|
||||||
|
|
||||||
if (do_not_recover) {
|
if (qedi_do_not_recover) {
|
||||||
QEDI_ERR(&qedi->dbg_ctx, "DONT SEND CLEANUP/ABORT %d\n",
|
QEDI_ERR(&qedi->dbg_ctx, "DONT SEND CLEANUP/ABORT %d\n",
|
||||||
do_not_recover);
|
qedi_do_not_recover);
|
||||||
goto abort_ret;
|
goto abort_ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -12,8 +12,14 @@
|
||||||
|
|
||||||
#include "qedi_iscsi.h"
|
#include "qedi_iscsi.h"
|
||||||
|
|
||||||
|
#ifdef CONFIG_DEBUG_FS
|
||||||
|
extern int qedi_do_not_recover;
|
||||||
|
#else
|
||||||
|
#define qedi_do_not_recover (0)
|
||||||
|
#endif
|
||||||
|
|
||||||
extern uint qedi_io_tracing;
|
extern uint qedi_io_tracing;
|
||||||
extern int do_not_recover;
|
|
||||||
extern struct scsi_host_template qedi_host_template;
|
extern struct scsi_host_template qedi_host_template;
|
||||||
extern struct iscsi_transport qedi_iscsi_transport;
|
extern struct iscsi_transport qedi_iscsi_transport;
|
||||||
extern const struct qed_iscsi_ops *qedi_ops;
|
extern const struct qed_iscsi_ops *qedi_ops;
|
||||||
|
|
|
@ -833,7 +833,7 @@ qedi_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
|
||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (do_not_recover) {
|
if (qedi_do_not_recover) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
return ERR_PTR(ret);
|
return ERR_PTR(ret);
|
||||||
}
|
}
|
||||||
|
@ -957,7 +957,7 @@ static int qedi_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
|
||||||
struct qedi_endpoint *qedi_ep;
|
struct qedi_endpoint *qedi_ep;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
if (do_not_recover)
|
if (qedi_do_not_recover)
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
qedi_ep = ep->dd_data;
|
qedi_ep = ep->dd_data;
|
||||||
|
@ -1025,7 +1025,7 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (test_bit(QEDI_IN_RECOVERY, &qedi->flags)) {
|
if (test_bit(QEDI_IN_RECOVERY, &qedi->flags)) {
|
||||||
if (do_not_recover) {
|
if (qedi_do_not_recover) {
|
||||||
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
|
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
|
||||||
"Do not recover cid=0x%x\n",
|
"Do not recover cid=0x%x\n",
|
||||||
qedi_ep->iscsi_cid);
|
qedi_ep->iscsi_cid);
|
||||||
|
@ -1039,7 +1039,7 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (do_not_recover)
|
if (qedi_do_not_recover)
|
||||||
goto ep_exit_recover;
|
goto ep_exit_recover;
|
||||||
|
|
||||||
switch (qedi_ep->state) {
|
switch (qedi_ep->state) {
|
||||||
|
|
|
@ -1805,7 +1805,7 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
|
||||||
*/
|
*/
|
||||||
qedi_ops->common->update_pf_params(qedi->cdev, &qedi->pf_params);
|
qedi_ops->common->update_pf_params(qedi->cdev, &qedi->pf_params);
|
||||||
|
|
||||||
qedi_setup_int(qedi);
|
rc = qedi_setup_int(qedi);
|
||||||
if (rc)
|
if (rc)
|
||||||
goto stop_iscsi_func;
|
goto stop_iscsi_func;
|
||||||
|
|
||||||
|
|
|
@ -2707,13 +2707,9 @@ ql_dump_buffer(uint32_t level, scsi_qla_host_t *vha, int32_t id,
|
||||||
"%-+5d 0 1 2 3 4 5 6 7 8 9 A B C D E F\n", size);
|
"%-+5d 0 1 2 3 4 5 6 7 8 9 A B C D E F\n", size);
|
||||||
ql_dbg(level, vha, id,
|
ql_dbg(level, vha, id,
|
||||||
"----- -----------------------------------------------\n");
|
"----- -----------------------------------------------\n");
|
||||||
for (cnt = 0; cnt < size; cnt++, buf++) {
|
for (cnt = 0; cnt < size; cnt += 16) {
|
||||||
if (cnt % 16 == 0)
|
ql_dbg(level, vha, id, "%04x: ", cnt);
|
||||||
ql_dbg(level, vha, id, "%04x:", cnt & ~0xFU);
|
print_hex_dump(KERN_CONT, "", DUMP_PREFIX_NONE, 16, 1,
|
||||||
printk(" %02x", *buf);
|
buf + cnt, min(16U, size - cnt), false);
|
||||||
if (cnt % 16 == 15)
|
|
||||||
printk("\n");
|
|
||||||
}
|
}
|
||||||
if (cnt % 16 != 0)
|
|
||||||
printk("\n");
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -2932,6 +2932,8 @@ EXPORT_SYMBOL(scsi_target_resume);
|
||||||
/**
|
/**
|
||||||
* scsi_internal_device_block - internal function to put a device temporarily into the SDEV_BLOCK state
|
* scsi_internal_device_block - internal function to put a device temporarily into the SDEV_BLOCK state
|
||||||
* @sdev: device to block
|
* @sdev: device to block
|
||||||
|
* @wait: Whether or not to wait until ongoing .queuecommand() /
|
||||||
|
* .queue_rq() calls have finished.
|
||||||
*
|
*
|
||||||
* Block request made by scsi lld's to temporarily stop all
|
* Block request made by scsi lld's to temporarily stop all
|
||||||
* scsi commands on the specified device. May sleep.
|
* scsi commands on the specified device. May sleep.
|
||||||
|
@ -2949,7 +2951,7 @@ EXPORT_SYMBOL(scsi_target_resume);
|
||||||
* remove the rport mutex lock and unlock calls from srp_queuecommand().
|
* remove the rport mutex lock and unlock calls from srp_queuecommand().
|
||||||
*/
|
*/
|
||||||
int
|
int
|
||||||
scsi_internal_device_block(struct scsi_device *sdev)
|
scsi_internal_device_block(struct scsi_device *sdev, bool wait)
|
||||||
{
|
{
|
||||||
struct request_queue *q = sdev->request_queue;
|
struct request_queue *q = sdev->request_queue;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
|
@ -2969,12 +2971,16 @@ scsi_internal_device_block(struct scsi_device *sdev)
|
||||||
* request queue.
|
* request queue.
|
||||||
*/
|
*/
|
||||||
if (q->mq_ops) {
|
if (q->mq_ops) {
|
||||||
blk_mq_quiesce_queue(q);
|
if (wait)
|
||||||
|
blk_mq_quiesce_queue(q);
|
||||||
|
else
|
||||||
|
blk_mq_stop_hw_queues(q);
|
||||||
} else {
|
} else {
|
||||||
spin_lock_irqsave(q->queue_lock, flags);
|
spin_lock_irqsave(q->queue_lock, flags);
|
||||||
blk_stop_queue(q);
|
blk_stop_queue(q);
|
||||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||||
scsi_wait_for_queuecommand(sdev);
|
if (wait)
|
||||||
|
scsi_wait_for_queuecommand(sdev);
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -3036,7 +3042,7 @@ EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);
|
||||||
static void
|
static void
|
||||||
device_block(struct scsi_device *sdev, void *data)
|
device_block(struct scsi_device *sdev, void *data)
|
||||||
{
|
{
|
||||||
scsi_internal_device_block(sdev);
|
scsi_internal_device_block(sdev, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
|
|
|
@ -188,8 +188,5 @@ static inline void scsi_dh_remove_device(struct scsi_device *sdev) { }
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define SCSI_DEVICE_BLOCK_MAX_TIMEOUT 600 /* units in seconds */
|
#define SCSI_DEVICE_BLOCK_MAX_TIMEOUT 600 /* units in seconds */
|
||||||
extern int scsi_internal_device_block(struct scsi_device *sdev);
|
|
||||||
extern int scsi_internal_device_unblock(struct scsi_device *sdev,
|
|
||||||
enum scsi_device_state new_state);
|
|
||||||
|
|
||||||
#endif /* _SCSI_PRIV_H */
|
#endif /* _SCSI_PRIV_H */
|
||||||
|
|
|
@ -1783,6 +1783,8 @@ static int sd_done(struct scsi_cmnd *SCpnt)
|
||||||
{
|
{
|
||||||
int result = SCpnt->result;
|
int result = SCpnt->result;
|
||||||
unsigned int good_bytes = result ? 0 : scsi_bufflen(SCpnt);
|
unsigned int good_bytes = result ? 0 : scsi_bufflen(SCpnt);
|
||||||
|
unsigned int sector_size = SCpnt->device->sector_size;
|
||||||
|
unsigned int resid;
|
||||||
struct scsi_sense_hdr sshdr;
|
struct scsi_sense_hdr sshdr;
|
||||||
struct scsi_disk *sdkp = scsi_disk(SCpnt->request->rq_disk);
|
struct scsi_disk *sdkp = scsi_disk(SCpnt->request->rq_disk);
|
||||||
struct request *req = SCpnt->request;
|
struct request *req = SCpnt->request;
|
||||||
|
@ -1813,6 +1815,21 @@ static int sd_done(struct scsi_cmnd *SCpnt)
|
||||||
scsi_set_resid(SCpnt, blk_rq_bytes(req));
|
scsi_set_resid(SCpnt, blk_rq_bytes(req));
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
default:
|
||||||
|
/*
|
||||||
|
* In case of bogus fw or device, we could end up having
|
||||||
|
* an unaligned partial completion. Check this here and force
|
||||||
|
* alignment.
|
||||||
|
*/
|
||||||
|
resid = scsi_get_resid(SCpnt);
|
||||||
|
if (resid & (sector_size - 1)) {
|
||||||
|
sd_printk(KERN_INFO, sdkp,
|
||||||
|
"Unaligned partial completion (resid=%u, sector_sz=%u)\n",
|
||||||
|
resid, sector_size);
|
||||||
|
resid = min(scsi_bufflen(SCpnt),
|
||||||
|
round_up(resid, sector_size));
|
||||||
|
scsi_set_resid(SCpnt, resid);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (result) {
|
if (result) {
|
||||||
|
|
|
@ -146,7 +146,7 @@ enum attr_idn {
|
||||||
/* Descriptor idn for Query requests */
|
/* Descriptor idn for Query requests */
|
||||||
enum desc_idn {
|
enum desc_idn {
|
||||||
QUERY_DESC_IDN_DEVICE = 0x0,
|
QUERY_DESC_IDN_DEVICE = 0x0,
|
||||||
QUERY_DESC_IDN_CONFIGURAION = 0x1,
|
QUERY_DESC_IDN_CONFIGURATION = 0x1,
|
||||||
QUERY_DESC_IDN_UNIT = 0x2,
|
QUERY_DESC_IDN_UNIT = 0x2,
|
||||||
QUERY_DESC_IDN_RFU_0 = 0x3,
|
QUERY_DESC_IDN_RFU_0 = 0x3,
|
||||||
QUERY_DESC_IDN_INTERCONNECT = 0x4,
|
QUERY_DESC_IDN_INTERCONNECT = 0x4,
|
||||||
|
@ -162,19 +162,13 @@ enum desc_header_offset {
|
||||||
QUERY_DESC_DESC_TYPE_OFFSET = 0x01,
|
QUERY_DESC_DESC_TYPE_OFFSET = 0x01,
|
||||||
};
|
};
|
||||||
|
|
||||||
enum ufs_desc_max_size {
|
enum ufs_desc_def_size {
|
||||||
QUERY_DESC_DEVICE_MAX_SIZE = 0x40,
|
QUERY_DESC_DEVICE_DEF_SIZE = 0x40,
|
||||||
QUERY_DESC_CONFIGURAION_MAX_SIZE = 0x90,
|
QUERY_DESC_CONFIGURATION_DEF_SIZE = 0x90,
|
||||||
QUERY_DESC_UNIT_MAX_SIZE = 0x23,
|
QUERY_DESC_UNIT_DEF_SIZE = 0x23,
|
||||||
QUERY_DESC_INTERCONNECT_MAX_SIZE = 0x06,
|
QUERY_DESC_INTERCONNECT_DEF_SIZE = 0x06,
|
||||||
/*
|
QUERY_DESC_GEOMETRY_DEF_SIZE = 0x44,
|
||||||
* Max. 126 UNICODE characters (2 bytes per character) plus 2 bytes
|
QUERY_DESC_POWER_DEF_SIZE = 0x62,
|
||||||
* of descriptor header.
|
|
||||||
*/
|
|
||||||
QUERY_DESC_STRING_MAX_SIZE = 0xFE,
|
|
||||||
QUERY_DESC_GEOMETRY_MAX_SIZE = 0x44,
|
|
||||||
QUERY_DESC_POWER_MAX_SIZE = 0x62,
|
|
||||||
QUERY_DESC_RFU_MAX_SIZE = 0x00,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Unit descriptor parameters offsets in bytes*/
|
/* Unit descriptor parameters offsets in bytes*/
|
||||||
|
|
|
@ -100,19 +100,6 @@
|
||||||
#define ufshcd_hex_dump(prefix_str, buf, len) \
|
#define ufshcd_hex_dump(prefix_str, buf, len) \
|
||||||
print_hex_dump(KERN_ERR, prefix_str, DUMP_PREFIX_OFFSET, 16, 4, buf, len, false)
|
print_hex_dump(KERN_ERR, prefix_str, DUMP_PREFIX_OFFSET, 16, 4, buf, len, false)
|
||||||
|
|
||||||
static u32 ufs_query_desc_max_size[] = {
|
|
||||||
QUERY_DESC_DEVICE_MAX_SIZE,
|
|
||||||
QUERY_DESC_CONFIGURAION_MAX_SIZE,
|
|
||||||
QUERY_DESC_UNIT_MAX_SIZE,
|
|
||||||
QUERY_DESC_RFU_MAX_SIZE,
|
|
||||||
QUERY_DESC_INTERCONNECT_MAX_SIZE,
|
|
||||||
QUERY_DESC_STRING_MAX_SIZE,
|
|
||||||
QUERY_DESC_RFU_MAX_SIZE,
|
|
||||||
QUERY_DESC_GEOMETRY_MAX_SIZE,
|
|
||||||
QUERY_DESC_POWER_MAX_SIZE,
|
|
||||||
QUERY_DESC_RFU_MAX_SIZE,
|
|
||||||
};
|
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
UFSHCD_MAX_CHANNEL = 0,
|
UFSHCD_MAX_CHANNEL = 0,
|
||||||
UFSHCD_MAX_ID = 1,
|
UFSHCD_MAX_ID = 1,
|
||||||
|
@ -2857,7 +2844,7 @@ static int __ufshcd_query_descriptor(struct ufs_hba *hba,
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (*buf_len <= QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
|
if (*buf_len < QUERY_DESC_MIN_SIZE || *buf_len > QUERY_DESC_MAX_SIZE) {
|
||||||
dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
|
dev_err(hba->dev, "%s: descriptor buffer size (%d) is out of range\n",
|
||||||
__func__, *buf_len);
|
__func__, *buf_len);
|
||||||
err = -EINVAL;
|
err = -EINVAL;
|
||||||
|
@ -2937,6 +2924,92 @@ static int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ufshcd_read_desc_length - read the specified descriptor length from header
|
||||||
|
* @hba: Pointer to adapter instance
|
||||||
|
* @desc_id: descriptor idn value
|
||||||
|
* @desc_index: descriptor index
|
||||||
|
* @desc_length: pointer to variable to read the length of descriptor
|
||||||
|
*
|
||||||
|
* Return 0 in case of success, non-zero otherwise
|
||||||
|
*/
|
||||||
|
static int ufshcd_read_desc_length(struct ufs_hba *hba,
|
||||||
|
enum desc_idn desc_id,
|
||||||
|
int desc_index,
|
||||||
|
int *desc_length)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
u8 header[QUERY_DESC_HDR_SIZE];
|
||||||
|
int header_len = QUERY_DESC_HDR_SIZE;
|
||||||
|
|
||||||
|
if (desc_id >= QUERY_DESC_IDN_MAX)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
|
||||||
|
desc_id, desc_index, 0, header,
|
||||||
|
&header_len);
|
||||||
|
|
||||||
|
if (ret) {
|
||||||
|
dev_err(hba->dev, "%s: Failed to get descriptor header id %d",
|
||||||
|
__func__, desc_id);
|
||||||
|
return ret;
|
||||||
|
} else if (desc_id != header[QUERY_DESC_DESC_TYPE_OFFSET]) {
|
||||||
|
dev_warn(hba->dev, "%s: descriptor header id %d and desc_id %d mismatch",
|
||||||
|
__func__, header[QUERY_DESC_DESC_TYPE_OFFSET],
|
||||||
|
desc_id);
|
||||||
|
ret = -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
*desc_length = header[QUERY_DESC_LENGTH_OFFSET];
|
||||||
|
return ret;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* ufshcd_map_desc_id_to_length - map descriptor IDN to its length
|
||||||
|
* @hba: Pointer to adapter instance
|
||||||
|
* @desc_id: descriptor idn value
|
||||||
|
* @desc_len: mapped desc length (out)
|
||||||
|
*
|
||||||
|
* Return 0 in case of success, non-zero otherwise
|
||||||
|
*/
|
||||||
|
int ufshcd_map_desc_id_to_length(struct ufs_hba *hba,
|
||||||
|
enum desc_idn desc_id, int *desc_len)
|
||||||
|
{
|
||||||
|
switch (desc_id) {
|
||||||
|
case QUERY_DESC_IDN_DEVICE:
|
||||||
|
*desc_len = hba->desc_size.dev_desc;
|
||||||
|
break;
|
||||||
|
case QUERY_DESC_IDN_POWER:
|
||||||
|
*desc_len = hba->desc_size.pwr_desc;
|
||||||
|
break;
|
||||||
|
case QUERY_DESC_IDN_GEOMETRY:
|
||||||
|
*desc_len = hba->desc_size.geom_desc;
|
||||||
|
break;
|
||||||
|
case QUERY_DESC_IDN_CONFIGURATION:
|
||||||
|
*desc_len = hba->desc_size.conf_desc;
|
||||||
|
break;
|
||||||
|
case QUERY_DESC_IDN_UNIT:
|
||||||
|
*desc_len = hba->desc_size.unit_desc;
|
||||||
|
break;
|
||||||
|
case QUERY_DESC_IDN_INTERCONNECT:
|
||||||
|
*desc_len = hba->desc_size.interc_desc;
|
||||||
|
break;
|
||||||
|
case QUERY_DESC_IDN_STRING:
|
||||||
|
*desc_len = QUERY_DESC_MAX_SIZE;
|
||||||
|
break;
|
||||||
|
case QUERY_DESC_IDN_RFU_0:
|
||||||
|
case QUERY_DESC_IDN_RFU_1:
|
||||||
|
*desc_len = 0;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
*desc_len = 0;
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(ufshcd_map_desc_id_to_length);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ufshcd_read_desc_param - read the specified descriptor parameter
|
* ufshcd_read_desc_param - read the specified descriptor parameter
|
||||||
* @hba: Pointer to adapter instance
|
* @hba: Pointer to adapter instance
|
||||||
|
@ -2951,42 +3024,49 @@ static int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
|
||||||
static int ufshcd_read_desc_param(struct ufs_hba *hba,
|
static int ufshcd_read_desc_param(struct ufs_hba *hba,
|
||||||
enum desc_idn desc_id,
|
enum desc_idn desc_id,
|
||||||
int desc_index,
|
int desc_index,
|
||||||
u32 param_offset,
|
u8 param_offset,
|
||||||
u8 *param_read_buf,
|
u8 *param_read_buf,
|
||||||
u32 param_size)
|
u8 param_size)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
u8 *desc_buf;
|
u8 *desc_buf;
|
||||||
u32 buff_len;
|
int buff_len;
|
||||||
bool is_kmalloc = true;
|
bool is_kmalloc = true;
|
||||||
|
|
||||||
/* safety checks */
|
/* Safety check */
|
||||||
if (desc_id >= QUERY_DESC_IDN_MAX)
|
if (desc_id >= QUERY_DESC_IDN_MAX || !param_size)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
buff_len = ufs_query_desc_max_size[desc_id];
|
/* Get the max length of descriptor from structure filled up at probe
|
||||||
if ((param_offset + param_size) > buff_len)
|
* time.
|
||||||
return -EINVAL;
|
*/
|
||||||
|
ret = ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len);
|
||||||
|
|
||||||
if (!param_offset && (param_size == buff_len)) {
|
/* Sanity checks */
|
||||||
/* memory space already available to hold full descriptor */
|
if (ret || !buff_len) {
|
||||||
desc_buf = param_read_buf;
|
dev_err(hba->dev, "%s: Failed to get full descriptor length",
|
||||||
is_kmalloc = false;
|
__func__);
|
||||||
} else {
|
return ret;
|
||||||
/* allocate memory to hold full descriptor */
|
}
|
||||||
|
|
||||||
|
/* Check whether we need temp memory */
|
||||||
|
if (param_offset != 0 || param_size < buff_len) {
|
||||||
desc_buf = kmalloc(buff_len, GFP_KERNEL);
|
desc_buf = kmalloc(buff_len, GFP_KERNEL);
|
||||||
if (!desc_buf)
|
if (!desc_buf)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
} else {
|
||||||
|
desc_buf = param_read_buf;
|
||||||
|
is_kmalloc = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Request for full descriptor */
|
||||||
ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
|
ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC,
|
||||||
desc_id, desc_index, 0, desc_buf,
|
desc_id, desc_index, 0,
|
||||||
&buff_len);
|
desc_buf, &buff_len);
|
||||||
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
|
dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d, desc_index %d, param_offset %d, ret %d",
|
||||||
__func__, desc_id, desc_index, param_offset, ret);
|
__func__, desc_id, desc_index, param_offset, ret);
|
||||||
|
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2998,25 +3078,9 @@ static int ufshcd_read_desc_param(struct ufs_hba *hba,
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/* Check wherher we will not copy more data, than available */
|
||||||
* While reading variable size descriptors (like string descriptor),
|
if (is_kmalloc && param_size > buff_len)
|
||||||
* some UFS devices may report the "LENGTH" (field in "Transaction
|
param_size = buff_len;
|
||||||
* Specific fields" of Query Response UPIU) same as what was requested
|
|
||||||
* in Query Request UPIU instead of reporting the actual size of the
|
|
||||||
* variable size descriptor.
|
|
||||||
* Although it's safe to ignore the "LENGTH" field for variable size
|
|
||||||
* descriptors as we can always derive the length of the descriptor from
|
|
||||||
* the descriptor header fields. Hence this change impose the length
|
|
||||||
* match check only for fixed size descriptors (for which we always
|
|
||||||
* request the correct size as part of Query Request UPIU).
|
|
||||||
*/
|
|
||||||
if ((desc_id != QUERY_DESC_IDN_STRING) &&
|
|
||||||
(buff_len != desc_buf[QUERY_DESC_LENGTH_OFFSET])) {
|
|
||||||
dev_err(hba->dev, "%s: desc_buf length mismatch: buff_len %d, buff_len(desc_header) %d",
|
|
||||||
__func__, buff_len, desc_buf[QUERY_DESC_LENGTH_OFFSET]);
|
|
||||||
ret = -EINVAL;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (is_kmalloc)
|
if (is_kmalloc)
|
||||||
memcpy(param_read_buf, &desc_buf[param_offset], param_size);
|
memcpy(param_read_buf, &desc_buf[param_offset], param_size);
|
||||||
|
@ -5919,8 +5983,8 @@ static int ufshcd_set_icc_levels_attr(struct ufs_hba *hba, u32 icc_level)
|
||||||
static void ufshcd_init_icc_levels(struct ufs_hba *hba)
|
static void ufshcd_init_icc_levels(struct ufs_hba *hba)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
int buff_len = QUERY_DESC_POWER_MAX_SIZE;
|
int buff_len = hba->desc_size.pwr_desc;
|
||||||
u8 desc_buf[QUERY_DESC_POWER_MAX_SIZE];
|
u8 desc_buf[hba->desc_size.pwr_desc];
|
||||||
|
|
||||||
ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
|
ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
|
@ -6017,11 +6081,10 @@ static int ufs_get_device_desc(struct ufs_hba *hba,
|
||||||
{
|
{
|
||||||
int err;
|
int err;
|
||||||
u8 model_index;
|
u8 model_index;
|
||||||
u8 str_desc_buf[QUERY_DESC_STRING_MAX_SIZE + 1] = {0};
|
u8 str_desc_buf[QUERY_DESC_MAX_SIZE + 1] = {0};
|
||||||
u8 desc_buf[QUERY_DESC_DEVICE_MAX_SIZE];
|
u8 desc_buf[hba->desc_size.dev_desc];
|
||||||
|
|
||||||
err = ufshcd_read_device_desc(hba, desc_buf,
|
err = ufshcd_read_device_desc(hba, desc_buf, hba->desc_size.dev_desc);
|
||||||
QUERY_DESC_DEVICE_MAX_SIZE);
|
|
||||||
if (err) {
|
if (err) {
|
||||||
dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
|
dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n",
|
||||||
__func__, err);
|
__func__, err);
|
||||||
|
@ -6038,14 +6101,14 @@ static int ufs_get_device_desc(struct ufs_hba *hba,
|
||||||
model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
|
model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME];
|
||||||
|
|
||||||
err = ufshcd_read_string_desc(hba, model_index, str_desc_buf,
|
err = ufshcd_read_string_desc(hba, model_index, str_desc_buf,
|
||||||
QUERY_DESC_STRING_MAX_SIZE, ASCII_STD);
|
QUERY_DESC_MAX_SIZE, ASCII_STD);
|
||||||
if (err) {
|
if (err) {
|
||||||
dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
|
dev_err(hba->dev, "%s: Failed reading Product Name. err = %d\n",
|
||||||
__func__, err);
|
__func__, err);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
str_desc_buf[QUERY_DESC_STRING_MAX_SIZE] = '\0';
|
str_desc_buf[QUERY_DESC_MAX_SIZE] = '\0';
|
||||||
strlcpy(dev_desc->model, (str_desc_buf + QUERY_DESC_HDR_SIZE),
|
strlcpy(dev_desc->model, (str_desc_buf + QUERY_DESC_HDR_SIZE),
|
||||||
min_t(u8, str_desc_buf[QUERY_DESC_LENGTH_OFFSET],
|
min_t(u8, str_desc_buf[QUERY_DESC_LENGTH_OFFSET],
|
||||||
MAX_MODEL_LEN));
|
MAX_MODEL_LEN));
|
||||||
|
@ -6251,6 +6314,51 @@ static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
|
||||||
hba->req_abort_count = 0;
|
hba->req_abort_count = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void ufshcd_init_desc_sizes(struct ufs_hba *hba)
|
||||||
|
{
|
||||||
|
int err;
|
||||||
|
|
||||||
|
err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_DEVICE, 0,
|
||||||
|
&hba->desc_size.dev_desc);
|
||||||
|
if (err)
|
||||||
|
hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
|
||||||
|
|
||||||
|
err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_POWER, 0,
|
||||||
|
&hba->desc_size.pwr_desc);
|
||||||
|
if (err)
|
||||||
|
hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
|
||||||
|
|
||||||
|
err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_INTERCONNECT, 0,
|
||||||
|
&hba->desc_size.interc_desc);
|
||||||
|
if (err)
|
||||||
|
hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
|
||||||
|
|
||||||
|
err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_CONFIGURATION, 0,
|
||||||
|
&hba->desc_size.conf_desc);
|
||||||
|
if (err)
|
||||||
|
hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
|
||||||
|
|
||||||
|
err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_UNIT, 0,
|
||||||
|
&hba->desc_size.unit_desc);
|
||||||
|
if (err)
|
||||||
|
hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
|
||||||
|
|
||||||
|
err = ufshcd_read_desc_length(hba, QUERY_DESC_IDN_GEOMETRY, 0,
|
||||||
|
&hba->desc_size.geom_desc);
|
||||||
|
if (err)
|
||||||
|
hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ufshcd_def_desc_sizes(struct ufs_hba *hba)
|
||||||
|
{
|
||||||
|
hba->desc_size.dev_desc = QUERY_DESC_DEVICE_DEF_SIZE;
|
||||||
|
hba->desc_size.pwr_desc = QUERY_DESC_POWER_DEF_SIZE;
|
||||||
|
hba->desc_size.interc_desc = QUERY_DESC_INTERCONNECT_DEF_SIZE;
|
||||||
|
hba->desc_size.conf_desc = QUERY_DESC_CONFIGURATION_DEF_SIZE;
|
||||||
|
hba->desc_size.unit_desc = QUERY_DESC_UNIT_DEF_SIZE;
|
||||||
|
hba->desc_size.geom_desc = QUERY_DESC_GEOMETRY_DEF_SIZE;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ufshcd_probe_hba - probe hba to detect device and initialize
|
* ufshcd_probe_hba - probe hba to detect device and initialize
|
||||||
* @hba: per-adapter instance
|
* @hba: per-adapter instance
|
||||||
|
@ -6285,6 +6393,9 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
|
||||||
if (ret)
|
if (ret)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
/* Init check for device descriptor sizes */
|
||||||
|
ufshcd_init_desc_sizes(hba);
|
||||||
|
|
||||||
ret = ufs_get_device_desc(hba, &card);
|
ret = ufs_get_device_desc(hba, &card);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
|
dev_err(hba->dev, "%s: Failed getting device info. err = %d\n",
|
||||||
|
@ -6320,6 +6431,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
|
||||||
|
|
||||||
/* set the state as operational after switching to desired gear */
|
/* set the state as operational after switching to desired gear */
|
||||||
hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
|
hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If we are in error handling context or in power management callbacks
|
* If we are in error handling context or in power management callbacks
|
||||||
* context, no need to scan the host
|
* context, no need to scan the host
|
||||||
|
@ -7774,6 +7886,9 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
|
||||||
hba->mmio_base = mmio_base;
|
hba->mmio_base = mmio_base;
|
||||||
hba->irq = irq;
|
hba->irq = irq;
|
||||||
|
|
||||||
|
/* Set descriptor lengths to specification defaults */
|
||||||
|
ufshcd_def_desc_sizes(hba);
|
||||||
|
|
||||||
err = ufshcd_hba_init(hba);
|
err = ufshcd_hba_init(hba);
|
||||||
if (err)
|
if (err)
|
||||||
goto out_error;
|
goto out_error;
|
||||||
|
|
|
@ -220,6 +220,15 @@ struct ufs_dev_cmd {
|
||||||
struct ufs_query query;
|
struct ufs_query query;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct ufs_desc_size {
|
||||||
|
int dev_desc;
|
||||||
|
int pwr_desc;
|
||||||
|
int geom_desc;
|
||||||
|
int interc_desc;
|
||||||
|
int unit_desc;
|
||||||
|
int conf_desc;
|
||||||
|
};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* struct ufs_clk_info - UFS clock related info
|
* struct ufs_clk_info - UFS clock related info
|
||||||
* @list: list headed by hba->clk_list_head
|
* @list: list headed by hba->clk_list_head
|
||||||
|
@ -483,6 +492,7 @@ struct ufs_stats {
|
||||||
* @clk_list_head: UFS host controller clocks list node head
|
* @clk_list_head: UFS host controller clocks list node head
|
||||||
* @pwr_info: holds current power mode
|
* @pwr_info: holds current power mode
|
||||||
* @max_pwr_info: keeps the device max valid pwm
|
* @max_pwr_info: keeps the device max valid pwm
|
||||||
|
* @desc_size: descriptor sizes reported by device
|
||||||
* @urgent_bkops_lvl: keeps track of urgent bkops level for device
|
* @urgent_bkops_lvl: keeps track of urgent bkops level for device
|
||||||
* @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for
|
* @is_urgent_bkops_lvl_checked: keeps track if the urgent bkops level for
|
||||||
* device is known or not.
|
* device is known or not.
|
||||||
|
@ -666,6 +676,7 @@ struct ufs_hba {
|
||||||
bool is_urgent_bkops_lvl_checked;
|
bool is_urgent_bkops_lvl_checked;
|
||||||
|
|
||||||
struct rw_semaphore clk_scaling_lock;
|
struct rw_semaphore clk_scaling_lock;
|
||||||
|
struct ufs_desc_size desc_size;
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Returns true if clocks can be gated. Otherwise false */
|
/* Returns true if clocks can be gated. Otherwise false */
|
||||||
|
@ -832,6 +843,10 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
|
||||||
enum flag_idn idn, bool *flag_res);
|
enum flag_idn idn, bool *flag_res);
|
||||||
int ufshcd_hold(struct ufs_hba *hba, bool async);
|
int ufshcd_hold(struct ufs_hba *hba, bool async);
|
||||||
void ufshcd_release(struct ufs_hba *hba);
|
void ufshcd_release(struct ufs_hba *hba);
|
||||||
|
|
||||||
|
int ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
|
||||||
|
int *desc_length);
|
||||||
|
|
||||||
u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba);
|
u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba);
|
||||||
|
|
||||||
/* Wrapper functions for safely calling variant operations */
|
/* Wrapper functions for safely calling variant operations */
|
||||||
|
|
|
@ -1487,7 +1487,7 @@ static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||||
irq_flag &= ~PCI_IRQ_MSI;
|
irq_flag &= ~PCI_IRQ_MSI;
|
||||||
|
|
||||||
error = pci_alloc_irq_vectors(adapter->dev, 1, 1, irq_flag);
|
error = pci_alloc_irq_vectors(adapter->dev, 1, 1, irq_flag);
|
||||||
if (error)
|
if (error < 0)
|
||||||
goto out_reset_adapter;
|
goto out_reset_adapter;
|
||||||
|
|
||||||
adapter->use_req_threshold = pvscsi_setup_req_threshold(adapter, true);
|
adapter->use_req_threshold = pvscsi_setup_req_threshold(adapter, true);
|
||||||
|
|
|
@ -196,6 +196,7 @@ struct iscsi_conn {
|
||||||
struct iscsi_task *task; /* xmit task in progress */
|
struct iscsi_task *task; /* xmit task in progress */
|
||||||
|
|
||||||
/* xmit */
|
/* xmit */
|
||||||
|
spinlock_t taskqueuelock; /* protects the next three lists */
|
||||||
struct list_head mgmtqueue; /* mgmt (control) xmit queue */
|
struct list_head mgmtqueue; /* mgmt (control) xmit queue */
|
||||||
struct list_head cmdqueue; /* data-path cmd queue */
|
struct list_head cmdqueue; /* data-path cmd queue */
|
||||||
struct list_head requeue; /* tasks needing another run */
|
struct list_head requeue; /* tasks needing another run */
|
||||||
|
|
|
@ -472,6 +472,10 @@ static inline int scsi_device_created(struct scsi_device *sdev)
|
||||||
sdev->sdev_state == SDEV_CREATED_BLOCK;
|
sdev->sdev_state == SDEV_CREATED_BLOCK;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
int scsi_internal_device_block(struct scsi_device *sdev, bool wait);
|
||||||
|
int scsi_internal_device_unblock(struct scsi_device *sdev,
|
||||||
|
enum scsi_device_state new_state);
|
||||||
|
|
||||||
/* accessor functions for the SCSI parameters */
|
/* accessor functions for the SCSI parameters */
|
||||||
static inline int scsi_device_sync(struct scsi_device *sdev)
|
static inline int scsi_device_sync(struct scsi_device *sdev)
|
||||||
{
|
{
|
||||||
|
|
Loading…
Reference in New Issue