scsi: qla2xxx: Added change to enable ZIO for FC-NVMe devices

Add support to the driver to set the exchange threshold value for
the number of outstanding AENs.

Signed-off-by: Duane Grigsby <duane.grigsby@cavium.com>
Signed-off-by: Darren Trapp <darren.trapp@cavium.com>
Signed-off-by: Anil Gurumurthy <anil.gurumurthy@cavium.com>
Signed-off-by: Himanshu Madhani <himanshu.madhani@cavium.com>
Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
This commit is contained in:
Duane Grigsby 2017-07-21 09:32:25 -07:00 committed by Martin K. Petersen
parent 0f7e51f6b3
commit deeae7a69f
7 changed files with 99 additions and 23 deletions

View File

@ -14,7 +14,7 @@
* | Module Init and Probe | 0x0193 | 0x0146 |
* | | | 0x015b-0x0160 |
* | | | 0x016e |
* | Mailbox commands | 0x1199 | 0x1193 |
* | Mailbox commands | 0x1205 | 0x11a2-0x11ff |
* | Device Discovery | 0x2134 | 0x210e-0x2116 |
* | | | 0x211a |
* | | | 0x211c-0x2128 |

View File

@ -977,6 +977,7 @@ struct mbx_cmd_32 {
#define MBC_ABORT_TARGET 0x17 /* Abort target (ID). */
#define MBC_RESET 0x18 /* Reset. */
#define MBC_GET_ADAPTER_LOOP_ID 0x20 /* Get loop id of ISP2200. */
#define MBC_GET_SET_ZIO_THRESHOLD 0x21 /* Get/SET ZIO THRESHOLD. */
#define MBC_GET_RETRY_COUNT 0x22 /* Get f/w retry cnt/delay. */
#define MBC_DISABLE_VI 0x24 /* Disable VI operation. */
#define MBC_ENABLE_VI 0x25 /* Enable VI operation. */
@ -4017,6 +4018,9 @@ struct qla_hw_data {
struct qlt_hw_data tgt;
int allow_cna_fw_dump;
atomic_t nvme_active_aen_cnt;
uint16_t nvme_last_rptd_aen; /* Last recorded aen count */
};
/*
@ -4089,6 +4093,7 @@ typedef struct scsi_qla_host {
#define FX00_CRITEMP_RECOVERY 25
#define FX00_HOST_INFO_RESEND 26
#define QPAIR_ONLINE_CHECK_NEEDED 27
#define SET_ZIO_THRESHOLD_NEEDED 28
unsigned long pci_flags;
#define PFLG_DISCONNECTED 0 /* PCI device removed */

View File

@ -484,6 +484,9 @@ int qla24xx_gidlist_wait(struct scsi_qla_host *, void *, dma_addr_t,
int __qla24xx_parse_gpdb(struct scsi_qla_host *, fc_port_t *,
struct port_database_24xx *);
extern int qla27xx_get_zio_threshold(scsi_qla_host_t *, uint16_t *);
extern int qla27xx_set_zio_threshold(scsi_qla_host_t *, uint16_t);
/*
* Global Function Prototypes in qla_isr.c source file.
*/

View File

@ -1823,7 +1823,7 @@ qla24xx_nvme_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, void *tsk)
nvme = &sp->u.iocb_cmd;
if (unlikely(nvme->u.nvme.aen_op))
atomic_dec(&sp->vha->nvme_active_aen_cnt);
atomic_dec(&sp->vha->hw->nvme_active_aen_cnt);
/*
* State flags: Bit 6 and 0.

View File

@ -947,20 +947,12 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
"%s: Firmware supports Exchange Offload 0x%x\n",
__func__, ha->fw_attributes_h);
/* bit 26 of fw_attributes */
if ((ha->fw_attributes_h & 0x400) && ql2xnvmeenable) {
struct init_cb_24xx *icb;
icb = (struct init_cb_24xx *)ha->init_cb;
/*
* fw supports nvme and driver load
* parameter requested nvme
*/
/*
* FW supports nvme and driver load parameter requested nvme.
* BIT 26 of fw_attributes indicates NVMe support.
*/
if ((ha->fw_attributes_h & 0x400) && ql2xnvmeenable)
vha->flags.nvme_enabled = 1;
icb->firmware_options_2 &= cpu_to_le32(~0xf);
ha->zio_mode = 0;
ha->zio_timer = 0;
}
}
@ -6085,3 +6077,56 @@ int qla24xx_gidlist_wait(struct scsi_qla_host *vha,
done:
return rval;
}
int qla27xx_set_zio_threshold(scsi_qla_host_t *vha, uint16_t value)
{
int rval;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1200,
"Entered %s\n", __func__);
memset(mcp->mb, 0 , sizeof(mcp->mb));
mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
mcp->mb[1] = cpu_to_le16(1);
mcp->mb[2] = cpu_to_le16(value);
mcp->out_mb = MBX_2 | MBX_1 | MBX_0;
mcp->in_mb = MBX_2 | MBX_0;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
ql_dbg(ql_dbg_mbx, vha, 0x1201, "%s %x\n",
(rval != QLA_SUCCESS) ? "Failed" : "Done", rval);
return rval;
}
int qla27xx_get_zio_threshold(scsi_qla_host_t *vha, uint16_t *value)
{
int rval;
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1203,
"Entered %s\n", __func__);
memset(mcp->mb, 0, sizeof(mcp->mb));
mcp->mb[0] = MBC_GET_SET_ZIO_THRESHOLD;
mcp->mb[1] = cpu_to_le16(0);
mcp->out_mb = MBX_1 | MBX_0;
mcp->in_mb = MBX_2 | MBX_0;
mcp->tov = MBX_TOV_SECONDS;
mcp->flags = 0;
rval = qla2x00_mailbox_command(vha, mcp);
if (rval == QLA_SUCCESS)
*value = mc.mb[2];
ql_dbg(ql_dbg_mbx, vha, 0x1205, "%s %x\n",
(rval != QLA_SUCCESS) ? "Failed" : "Done", rval);
return rval;
}

View File

@ -305,6 +305,7 @@ static int qla2x00_start_nvme_mq(srb_t *sp)
uint16_t avail_dsds;
uint32_t *cur_dsd;
struct req_que *req = NULL;
struct rsp_que *rsp = NULL;
struct scsi_qla_host *vha = sp->fcport->vha;
struct qla_hw_data *ha = vha->hw;
struct qla_qpair *qpair = sp->qpair;
@ -313,13 +314,15 @@ static int qla2x00_start_nvme_mq(srb_t *sp)
struct nvmefc_fcp_req *fd = nvme->u.nvme.desc;
uint32_t rval = QLA_SUCCESS;
/* Setup qpair pointers */
req = qpair->req;
tot_dsds = fd->sg_cnt;
/* Acquire qpair specific lock */
spin_lock_irqsave(&qpair->qp_lock, flags);
/* Setup qpair pointers */
req = qpair->req;
rsp = qpair->rsp;
/* Check for room in outstanding command list. */
handle = req->current_outstanding_cmd;
for (index = 1; index < req->num_outstanding_cmds; index++) {
@ -354,7 +357,7 @@ static int qla2x00_start_nvme_mq(srb_t *sp)
struct nvme_fc_cmd_iu *cmd = fd->cmdaddr;
if (cmd->sqe.common.opcode == nvme_admin_async_event) {
nvme->u.nvme.aen_op = 1;
atomic_inc(&vha->nvme_active_aen_cnt);
atomic_inc(&vha->hw->nvme_active_aen_cnt);
}
}
@ -467,6 +470,11 @@ static int qla2x00_start_nvme_mq(srb_t *sp)
/* Set chip new ring index. */
WRT_REG_DWORD(req->req_q_in, req->ring_index);
/* Manage unprocessed RIO/ZIO commands in response queue. */
if (vha->flags.process_response_queue &&
rsp->ring_ptr->signature != RESPONSE_PROCESSED)
qla24xx_process_response_queue(vha, rsp);
queuing_error:
spin_unlock_irqrestore(&qpair->qp_lock, flags);
return rval;

View File

@ -2751,6 +2751,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
spin_lock_init(&ha->tgt.sess_lock);
spin_lock_init(&ha->tgt.atio_lock);
atomic_set(&ha->nvme_active_aen_cnt, 0);
/* Clear our data area */
ha->bars = bars;
@ -5828,6 +5829,17 @@ qla2x00_do_dpc(void *data)
mutex_unlock(&ha->mq_lock);
}
if (test_and_clear_bit(SET_ZIO_THRESHOLD_NEEDED, &base_vha->dpc_flags)) {
ql_log(ql_log_info, base_vha, 0xffffff,
"nvme: SET ZIO Activity exchange threshold to %d.\n",
ha->nvme_last_rptd_aen);
if (qla27xx_set_zio_threshold(base_vha, ha->nvme_last_rptd_aen)) {
ql_log(ql_log_info, base_vha, 0xffffff,
"nvme: Unable to SET ZIO Activity exchange threshold to %d.\n",
ha->nvme_last_rptd_aen);
}
}
if (!IS_QLAFX00(ha))
qla2x00_do_dpc_all_vps(base_vha);
@ -6025,12 +6037,15 @@ qla2x00_timer(scsi_qla_host_t *vha)
* FC-NVME
* see if the active AEN count has changed from what was last reported.
*/
if (atomic_read(&vha->nvme_active_aen_cnt) != vha->nvme_last_rptd_aen) {
vha->nvme_last_rptd_aen =
atomic_read(&vha->nvme_active_aen_cnt);
if (!vha->vp_idx &&
atomic_read(&ha->nvme_active_aen_cnt) != ha->nvme_last_rptd_aen &&
ha->zio_mode == QLA_ZIO_MODE_6) {
ql_log(ql_log_info, vha, 0x3002,
"reporting new aen count of %d to the fw\n",
vha->nvme_last_rptd_aen);
"nvme: Sched: Set ZIO exchange threshold to %d.\n",
ha->nvme_last_rptd_aen);
ha->nvme_last_rptd_aen = atomic_read(&ha->nvme_active_aen_cnt);
set_bit(SET_ZIO_THRESHOLD_NEEDED, &vha->dpc_flags);
start_dpc++;
}
/* Schedule the DPC routine if needed */