mirror of https://gitee.com/openkylin/linux.git
SCSI misc on 20210702
This series consists of the usual driver updates (ufs, ibmvfc, megaraid_sas, lpfc, elx, mpi3mr, qedi, iscsi, storvsc, mpt3sas) with elx and mpi3mr being new drivers. The major core change is a rework to drop the status byte handling macros and the old bit shifted definitions and the rest of the updates are minor fixes. Signed-off-by: James E.J. Bottomley <jejb@linux.ibm.com> -----BEGIN PGP SIGNATURE----- iJwEABMIAEQWIQTnYEDbdso9F2cI+arnQslM7pishQUCYN7I6iYcamFtZXMuYm90 dG9tbGV5QGhhbnNlbnBhcnRuZXJzaGlwLmNvbQAKCRDnQslM7pishXpRAQCkngYZ 35yQrqOxgOk2pfrysE95tHrV1MfJm2U49NFTwAEAuZutEvBUTfBF+sbcJ06r6q7i H0hkJN/Io7enFs5v3WA= =zwIa -----END PGP SIGNATURE----- Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi Pull SCSI updates from James Bottomley: "This series consists of the usual driver updates (ufs, ibmvfc, megaraid_sas, lpfc, elx, mpi3mr, qedi, iscsi, storvsc, mpt3sas) with elx and mpi3mr being new drivers. The major core change is a rework to drop the status byte handling macros and the old bit shifted definitions and the rest of the updates are minor fixes" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (287 commits) scsi: aha1740: Avoid over-read of sense buffer scsi: arcmsr: Avoid over-read of sense buffer scsi: ips: Avoid over-read of sense buffer scsi: ufs: ufs-mediatek: Add missing of_node_put() in ufs_mtk_probe() scsi: elx: libefc: Fix IRQ restore in efc_domain_dispatch_frame() scsi: elx: libefc: Fix less than zero comparison of a unsigned int scsi: elx: efct: Fix pointer error checking in debugfs init scsi: elx: efct: Fix is_originator return code type scsi: elx: efct: Fix link error for _bad_cmpxchg scsi: elx: efct: Eliminate unnecessary boolean check in efct_hw_command_cancel() scsi: elx: efct: Do not use id uninitialized in efct_lio_setup_session() scsi: elx: efct: Fix error handling in efct_hw_init() scsi: elx: efct: Remove redundant initialization of variable lun scsi: elx: efct: Fix spelling mistake "Unexected" -> "Unexpected" scsi: lpfc: Fix build error in lpfc_scsi.c scsi: target: iscsi: Remove redundant continue statement scsi: qla4xxx: Remove redundant continue statement scsi: ppa: Switch to use module_parport_driver() scsi: imm: Switch to use module_parport_driver() scsi: mpt3sas: Fix error return value in _scsih_expander_add() ...
This commit is contained in:
commit
bd31b9efbf
|
@ -995,6 +995,132 @@ Description: This entry shows the target state of an UFS UIC link
|
|||
|
||||
The file is read only.
|
||||
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/monitor/monitor_enable
|
||||
Date: January 2021
|
||||
Contact: Can Guo <cang@codeaurora.org>
|
||||
Description: This file shows the status of performance monitor enablement
|
||||
and it can be used to start/stop the monitor. When the monitor
|
||||
is stopped, the performance data collected is also cleared.
|
||||
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/monitor/monitor_chunk_size
|
||||
Date: January 2021
|
||||
Contact: Can Guo <cang@codeaurora.org>
|
||||
Description: This file tells the monitor to focus on requests transferring
|
||||
data of specific chunk size (in Bytes). 0 means any chunk size.
|
||||
It can only be changed when monitor is disabled.
|
||||
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/monitor/read_total_sectors
|
||||
Date: January 2021
|
||||
Contact: Can Guo <cang@codeaurora.org>
|
||||
Description: This file shows how many sectors (in 512 Bytes) have been
|
||||
sent from device to host after monitor gets started.
|
||||
|
||||
The file is read only.
|
||||
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/monitor/read_total_busy
|
||||
Date: January 2021
|
||||
Contact: Can Guo <cang@codeaurora.org>
|
||||
Description: This file shows how long (in micro seconds) has been spent
|
||||
sending data from device to host after monitor gets started.
|
||||
|
||||
The file is read only.
|
||||
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/monitor/read_nr_requests
|
||||
Date: January 2021
|
||||
Contact: Can Guo <cang@codeaurora.org>
|
||||
Description: This file shows how many read requests have been sent after
|
||||
monitor gets started.
|
||||
|
||||
The file is read only.
|
||||
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/monitor/read_req_latency_max
|
||||
Date: January 2021
|
||||
Contact: Can Guo <cang@codeaurora.org>
|
||||
Description: This file shows the maximum latency (in micro seconds) of
|
||||
read requests after monitor gets started.
|
||||
|
||||
The file is read only.
|
||||
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/monitor/read_req_latency_min
|
||||
Date: January 2021
|
||||
Contact: Can Guo <cang@codeaurora.org>
|
||||
Description: This file shows the minimum latency (in micro seconds) of
|
||||
read requests after monitor gets started.
|
||||
|
||||
The file is read only.
|
||||
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/monitor/read_req_latency_avg
|
||||
Date: January 2021
|
||||
Contact: Can Guo <cang@codeaurora.org>
|
||||
Description: This file shows the average latency (in micro seconds) of
|
||||
read requests after monitor gets started.
|
||||
|
||||
The file is read only.
|
||||
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/monitor/read_req_latency_sum
|
||||
Date: January 2021
|
||||
Contact: Can Guo <cang@codeaurora.org>
|
||||
Description: This file shows the total latency (in micro seconds) of
|
||||
read requests sent after monitor gets started.
|
||||
|
||||
The file is read only.
|
||||
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/monitor/write_total_sectors
|
||||
Date: January 2021
|
||||
Contact: Can Guo <cang@codeaurora.org>
|
||||
Description: This file shows how many sectors (in 512 Bytes) have been sent
|
||||
from host to device after monitor gets started.
|
||||
|
||||
The file is read only.
|
||||
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/monitor/write_total_busy
|
||||
Date: January 2021
|
||||
Contact: Can Guo <cang@codeaurora.org>
|
||||
Description: This file shows how long (in micro seconds) has been spent
|
||||
sending data from host to device after monitor gets started.
|
||||
|
||||
The file is read only.
|
||||
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/monitor/write_nr_requests
|
||||
Date: January 2021
|
||||
Contact: Can Guo <cang@codeaurora.org>
|
||||
Description: This file shows how many write requests have been sent after
|
||||
monitor gets started.
|
||||
|
||||
The file is read only.
|
||||
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/monitor/write_req_latency_max
|
||||
Date: January 2021
|
||||
Contact: Can Guo <cang@codeaurora.org>
|
||||
Description: This file shows the maximum latency (in micro seconds) of write
|
||||
requests after monitor gets started.
|
||||
|
||||
The file is read only.
|
||||
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/monitor/write_req_latency_min
|
||||
Date: January 2021
|
||||
Contact: Can Guo <cang@codeaurora.org>
|
||||
Description: This file shows the minimum latency (in micro seconds) of write
|
||||
requests after monitor gets started.
|
||||
|
||||
The file is read only.
|
||||
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/monitor/write_req_latency_avg
|
||||
Date: January 2021
|
||||
Contact: Can Guo <cang@codeaurora.org>
|
||||
Description: This file shows the average latency (in micro seconds) of write
|
||||
requests after monitor gets started.
|
||||
|
||||
The file is read only.
|
||||
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/monitor/write_req_latency_sum
|
||||
Date: January 2021
|
||||
Contact: Can Guo <cang@codeaurora.org>
|
||||
Description: This file shows the total latency (in micro seconds) of write
|
||||
requests after monitor gets started.
|
||||
|
||||
The file is read only.
|
||||
|
||||
What: /sys/bus/platform/drivers/ufshcd/*/device_descriptor/wb_presv_us_en
|
||||
Date: June 2020
|
||||
Contact: Asutosh Das <asutoshd@codeaurora.org>
|
||||
|
|
|
@ -1172,10 +1172,9 @@ Members of interest:
|
|||
of 0 implies a successfully completed command (and all
|
||||
data (if any) has been transferred to or from the SCSI
|
||||
target device). 'result' is a 32 bit unsigned integer that
|
||||
can be viewed as 4 related bytes. The SCSI status value is
|
||||
in the LSB. See include/scsi/scsi.h status_byte(),
|
||||
msg_byte(), host_byte() and driver_byte() macros and
|
||||
related constants.
|
||||
can be viewed as 2 related bytes. The SCSI status value is
|
||||
in the LSB. See include/scsi/scsi.h status_byte() and
|
||||
host_byte() macros and related constants.
|
||||
sense_buffer
|
||||
- an array (maximum size: SCSI_SENSE_BUFFERSIZE bytes) that
|
||||
should be written when the SCSI status (LSB of 'result')
|
||||
|
|
|
@ -6800,6 +6800,15 @@ S: Supported
|
|||
W: http://www.broadcom.com
|
||||
F: drivers/scsi/lpfc/
|
||||
|
||||
EMULEX/BROADCOM EFCT FC/FCOE SCSI TARGET DRIVER
|
||||
M: James Smart <james.smart@broadcom.com>
|
||||
M: Ram Vegesna <ram.vegesna@broadcom.com>
|
||||
L: linux-scsi@vger.kernel.org
|
||||
L: target-devel@vger.kernel.org
|
||||
S: Supported
|
||||
W: http://www.broadcom.com
|
||||
F: drivers/scsi/elx/
|
||||
|
||||
ENE CB710 FLASH CARD READER DRIVER
|
||||
M: Michał Mirosław <mirq-linux@rere.qmqm.pl>
|
||||
S: Maintained
|
||||
|
|
|
@ -151,6 +151,15 @@ config BLK_CGROUP_IOLATENCY
|
|||
|
||||
Note, this is an experimental interface and could be changed someday.
|
||||
|
||||
config BLK_CGROUP_FC_APPID
|
||||
bool "Enable support to track FC I/O Traffic across cgroup applications"
|
||||
depends on BLK_CGROUP=y
|
||||
help
|
||||
Enabling this option enables the support to track FC I/O traffic across
|
||||
cgroup applications. It enables the Fabric and the storage targets to
|
||||
identify, monitor, and handle FC traffic based on VM tags by inserting
|
||||
application specific identification into the FC frame.
|
||||
|
||||
config BLK_CGROUP_IOCOST
|
||||
bool "Enable support for cost model based cgroup IO controller"
|
||||
depends on BLK_CGROUP=y
|
||||
|
|
|
@ -84,7 +84,7 @@ static int bsg_transport_complete_rq(struct request *rq, struct sg_io_v4 *hdr)
|
|||
*/
|
||||
hdr->device_status = job->result & 0xff;
|
||||
hdr->transport_status = host_byte(job->result);
|
||||
hdr->driver_status = driver_byte(job->result);
|
||||
hdr->driver_status = 0;
|
||||
hdr->info = 0;
|
||||
if (hdr->device_status || hdr->transport_status || hdr->driver_status)
|
||||
hdr->info |= SG_INFO_CHECK;
|
||||
|
|
|
@ -96,7 +96,9 @@ static int bsg_scsi_complete_rq(struct request *rq, struct sg_io_v4 *hdr)
|
|||
*/
|
||||
hdr->device_status = sreq->result & 0xff;
|
||||
hdr->transport_status = host_byte(sreq->result);
|
||||
hdr->driver_status = driver_byte(sreq->result);
|
||||
hdr->driver_status = 0;
|
||||
if (scsi_status_is_check_condition(sreq->result))
|
||||
hdr->driver_status = DRIVER_SENSE;
|
||||
hdr->info = 0;
|
||||
if (hdr->device_status || hdr->transport_status || hdr->driver_status)
|
||||
hdr->info |= SG_INFO_CHECK;
|
||||
|
|
|
@ -254,9 +254,11 @@ static int blk_complete_sghdr_rq(struct request *rq, struct sg_io_hdr *hdr,
|
|||
*/
|
||||
hdr->status = req->result & 0xff;
|
||||
hdr->masked_status = status_byte(req->result);
|
||||
hdr->msg_status = msg_byte(req->result);
|
||||
hdr->msg_status = COMMAND_COMPLETE;
|
||||
hdr->host_status = host_byte(req->result);
|
||||
hdr->driver_status = driver_byte(req->result);
|
||||
hdr->driver_status = 0;
|
||||
if (scsi_status_is_check_condition(hdr->status))
|
||||
hdr->driver_status = DRIVER_SENSE;
|
||||
hdr->info = 0;
|
||||
if (hdr->masked_status || hdr->host_status || hdr->driver_status)
|
||||
hdr->info |= SG_INFO_CHECK;
|
||||
|
@ -484,9 +486,10 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
|
|||
break;
|
||||
}
|
||||
|
||||
if (bytes && blk_rq_map_kern(q, rq, buffer, bytes, GFP_NOIO)) {
|
||||
err = DRIVER_ERROR << 24;
|
||||
goto error;
|
||||
if (bytes) {
|
||||
err = blk_rq_map_kern(q, rq, buffer, bytes, GFP_NOIO);
|
||||
if (err)
|
||||
goto error;
|
||||
}
|
||||
|
||||
blk_execute_rq(disk, rq, 0);
|
||||
|
|
|
@ -196,9 +196,7 @@ void ata_scsi_set_sense(struct ata_device *dev, struct scsi_cmnd *cmd,
|
|||
if (!cmd)
|
||||
return;
|
||||
|
||||
cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
|
||||
|
||||
scsi_build_sense_buffer(d_sense, cmd->sense_buffer, sk, asc, ascq);
|
||||
scsi_build_sense(cmd, d_sense, sk, asc, ascq);
|
||||
}
|
||||
|
||||
void ata_scsi_set_sense_information(struct ata_device *dev,
|
||||
|
@ -409,13 +407,16 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
|
|||
cmd_result = scsi_execute(scsidev, scsi_cmd, data_dir, argbuf, argsize,
|
||||
sensebuf, &sshdr, (10*HZ), 5, 0, 0, NULL);
|
||||
|
||||
if (driver_byte(cmd_result) == DRIVER_SENSE) {/* sense data available */
|
||||
if (cmd_result < 0) {
|
||||
rc = cmd_result;
|
||||
goto error;
|
||||
}
|
||||
if (scsi_sense_valid(&sshdr)) {/* sense data available */
|
||||
u8 *desc = sensebuf + 8;
|
||||
cmd_result &= ~(0xFF<<24); /* DRIVER_SENSE is not an error */
|
||||
|
||||
/* If we set cc then ATA pass-through will cause a
|
||||
* check condition even if no error. Filter that. */
|
||||
if (cmd_result & SAM_STAT_CHECK_CONDITION) {
|
||||
if (scsi_status_is_check_condition(cmd_result)) {
|
||||
if (sshdr.sense_key == RECOVERED_ERROR &&
|
||||
sshdr.asc == 0 && sshdr.ascq == 0x1d)
|
||||
cmd_result &= ~SAM_STAT_CHECK_CONDITION;
|
||||
|
@ -490,9 +491,12 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg)
|
|||
cmd_result = scsi_execute(scsidev, scsi_cmd, DMA_NONE, NULL, 0,
|
||||
sensebuf, &sshdr, (10*HZ), 5, 0, 0, NULL);
|
||||
|
||||
if (driver_byte(cmd_result) == DRIVER_SENSE) {/* sense data available */
|
||||
if (cmd_result < 0) {
|
||||
rc = cmd_result;
|
||||
goto error;
|
||||
}
|
||||
if (scsi_sense_valid(&sshdr)) {/* sense data available */
|
||||
u8 *desc = sensebuf + 8;
|
||||
cmd_result &= ~(0xFF<<24); /* DRIVER_SENSE is not an error */
|
||||
|
||||
/* If we set cc then ATA pass-through will cause a
|
||||
* check condition even if no error. Filter that. */
|
||||
|
@ -638,7 +642,7 @@ static struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev,
|
|||
if (cmd->request->rq_flags & RQF_QUIET)
|
||||
qc->flags |= ATA_QCFLAG_QUIET;
|
||||
} else {
|
||||
cmd->result = (DID_OK << 16) | (QUEUE_FULL << 1);
|
||||
cmd->result = (DID_OK << 16) | SAM_STAT_TASK_SET_FULL;
|
||||
cmd->scsi_done(cmd);
|
||||
}
|
||||
|
||||
|
@ -858,8 +862,6 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
|
|||
|
||||
memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
|
||||
|
||||
cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
|
||||
|
||||
/*
|
||||
* Use ata_to_sense_error() to map status register bits
|
||||
* onto sense key, asc & ascq.
|
||||
|
@ -874,8 +876,7 @@ static void ata_gen_passthru_sense(struct ata_queued_cmd *qc)
|
|||
* ATA PASS-THROUGH INFORMATION AVAILABLE
|
||||
* Always in descriptor format sense.
|
||||
*/
|
||||
scsi_build_sense_buffer(1, cmd->sense_buffer,
|
||||
RECOVERED_ERROR, 0, 0x1D);
|
||||
scsi_build_sense(cmd, 1, RECOVERED_ERROR, 0, 0x1D);
|
||||
}
|
||||
|
||||
if ((cmd->sense_buffer[0] & 0x7f) >= 0x72) {
|
||||
|
@ -957,8 +958,6 @@ static void ata_gen_ata_sense(struct ata_queued_cmd *qc)
|
|||
|
||||
memset(sb, 0, SCSI_SENSE_BUFFERSIZE);
|
||||
|
||||
cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
|
||||
|
||||
if (ata_dev_disabled(dev)) {
|
||||
/* Device disabled after error recovery */
|
||||
/* LOGICAL UNIT NOT READY, HARD RESET REQUIRED */
|
||||
|
@ -4196,7 +4195,6 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd)
|
|||
|
||||
case REQUEST_SENSE:
|
||||
ata_scsi_set_sense(dev, cmd, 0, 0, 0);
|
||||
cmd->result = (DRIVER_SENSE << 24);
|
||||
break;
|
||||
|
||||
/* if we reach this, then writeback caching is disabled,
|
||||
|
|
|
@ -506,6 +506,7 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session,
|
|||
iser_conn->iscsi_conn = conn;
|
||||
|
||||
out:
|
||||
iscsi_put_endpoint(ep);
|
||||
mutex_unlock(&iser_conn->state_mutex);
|
||||
return error;
|
||||
}
|
||||
|
@ -1002,6 +1003,7 @@ static struct iscsi_transport iscsi_iser_transport = {
|
|||
/* connection management */
|
||||
.create_conn = iscsi_iser_conn_create,
|
||||
.bind_conn = iscsi_iser_conn_bind,
|
||||
.unbind_conn = iscsi_conn_unbind,
|
||||
.destroy_conn = iscsi_conn_teardown,
|
||||
.attr_is_visible = iser_attr_is_visible,
|
||||
.set_param = iscsi_iser_set_param,
|
||||
|
|
|
@ -2209,7 +2209,7 @@ static int srp_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd)
|
|||
* to reduce queue depth temporarily.
|
||||
*/
|
||||
scmnd->result = len == -ENOMEM ?
|
||||
DID_OK << 16 | QUEUE_FULL << 1 : DID_ERROR << 16;
|
||||
DID_OK << 16 | SAM_STAT_TASK_SET_FULL : DID_ERROR << 16;
|
||||
goto err_iu;
|
||||
}
|
||||
|
||||
|
|
|
@ -6993,8 +6993,6 @@ mpt_SoftResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
|
|||
ioc->ioc_reset_in_progress = 1;
|
||||
spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
|
||||
|
||||
rc = -1;
|
||||
|
||||
for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
|
||||
if (MptResetHandlers[cb_idx])
|
||||
mpt_signal_reset(cb_idx, ioc, MPT_IOC_SETUP_RESET);
|
||||
|
|
|
@ -86,7 +86,7 @@ MODULE_PARM_DESC(mpt_pt_clear,
|
|||
" Clear persistency table: enable=1 "
|
||||
"(default=MPTSCSIH_PT_CLEAR=0)");
|
||||
|
||||
/* scsi-mid layer global parmeter is max_report_luns, which is 511 */
|
||||
/* scsi-mid layer global parameter is max_report_luns, which is 511 */
|
||||
#define MPTSAS_MAX_LUN (16895)
|
||||
static int max_lun = MPTSAS_MAX_LUN;
|
||||
module_param(max_lun, int, 0);
|
||||
|
@ -420,12 +420,14 @@ mptsas_find_portinfo_by_handle(MPT_ADAPTER *ioc, u16 handle)
|
|||
}
|
||||
|
||||
/**
|
||||
* mptsas_find_portinfo_by_sas_address -
|
||||
* mptsas_find_portinfo_by_sas_address - find and return portinfo for
|
||||
* this sas_address
|
||||
* @ioc: Pointer to MPT_ADAPTER structure
|
||||
* @handle:
|
||||
* @sas_address: expander sas address
|
||||
*
|
||||
* This function should be called with the sas_topology_mutex already held
|
||||
* This function should be called with the sas_topology_mutex already held.
|
||||
*
|
||||
* Return: %NULL if not found.
|
||||
**/
|
||||
static struct mptsas_portinfo *
|
||||
mptsas_find_portinfo_by_sas_address(MPT_ADAPTER *ioc, u64 sas_address)
|
||||
|
@ -567,12 +569,14 @@ starget)
|
|||
}
|
||||
|
||||
/**
|
||||
* mptsas_add_device_component -
|
||||
* mptsas_add_device_component - adds a new device component to our lists
|
||||
* @ioc: Pointer to MPT_ADAPTER structure
|
||||
* @channel: fw mapped id's
|
||||
* @id:
|
||||
* @sas_address:
|
||||
* @device_info:
|
||||
* @channel: channel number
|
||||
* @id: Logical Target ID for reset (if appropriate)
|
||||
* @sas_address: expander sas address
|
||||
* @device_info: specific bits (flags) for devices
|
||||
* @slot: enclosure slot ID
|
||||
* @enclosure_logical_id: enclosure WWN
|
||||
*
|
||||
**/
|
||||
static void
|
||||
|
@ -634,10 +638,10 @@ mptsas_add_device_component(MPT_ADAPTER *ioc, u8 channel, u8 id,
|
|||
}
|
||||
|
||||
/**
|
||||
* mptsas_add_device_component_by_fw -
|
||||
* mptsas_add_device_component_by_fw - adds a new device component by FW ID
|
||||
* @ioc: Pointer to MPT_ADAPTER structure
|
||||
* @channel: fw mapped id's
|
||||
* @id:
|
||||
* @channel: channel number
|
||||
* @id: Logical Target ID
|
||||
*
|
||||
**/
|
||||
static void
|
||||
|
@ -668,8 +672,7 @@ mptsas_add_device_component_by_fw(MPT_ADAPTER *ioc, u8 channel, u8 id)
|
|||
/**
|
||||
* mptsas_add_device_component_starget_ir - Handle Integrated RAID, adding each individual device to list
|
||||
* @ioc: Pointer to MPT_ADAPTER structure
|
||||
* @channel: fw mapped id's
|
||||
* @id:
|
||||
* @starget: SCSI target for this SCSI device
|
||||
*
|
||||
**/
|
||||
static void
|
||||
|
@ -771,9 +774,9 @@ mptsas_add_device_component_starget_ir(MPT_ADAPTER *ioc,
|
|||
}
|
||||
|
||||
/**
|
||||
* mptsas_add_device_component_starget -
|
||||
* mptsas_add_device_component_starget - adds a SCSI target device component
|
||||
* @ioc: Pointer to MPT_ADAPTER structure
|
||||
* @starget:
|
||||
* @starget: SCSI target for this SCSI device
|
||||
*
|
||||
**/
|
||||
static void
|
||||
|
@ -806,7 +809,7 @@ mptsas_add_device_component_starget(MPT_ADAPTER *ioc,
|
|||
* mptsas_del_device_component_by_os - Once a device has been removed, we mark the entry in the list as being cached
|
||||
* @ioc: Pointer to MPT_ADAPTER structure
|
||||
* @channel: os mapped id's
|
||||
* @id:
|
||||
* @id: Logical Target ID
|
||||
*
|
||||
**/
|
||||
static void
|
||||
|
@ -978,11 +981,12 @@ mptsas_setup_wide_ports(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
|
|||
}
|
||||
|
||||
/**
|
||||
* csmisas_find_vtarget
|
||||
* mptsas_find_vtarget - find a virtual target device (FC LUN device or
|
||||
* SCSI target device)
|
||||
*
|
||||
* @ioc
|
||||
* @volume_id
|
||||
* @volume_bus
|
||||
* @ioc: Pointer to MPT_ADAPTER structure
|
||||
* @channel: channel number
|
||||
* @id: Logical Target ID
|
||||
*
|
||||
**/
|
||||
static VirtTarget *
|
||||
|
@ -1047,15 +1051,14 @@ mptsas_queue_rescan(MPT_ADAPTER *ioc)
|
|||
|
||||
|
||||
/**
|
||||
* mptsas_target_reset
|
||||
* mptsas_target_reset - Issues TARGET_RESET to end device using
|
||||
* handshaking method
|
||||
*
|
||||
* Issues TARGET_RESET to end device using handshaking method
|
||||
* @ioc: Pointer to MPT_ADAPTER structure
|
||||
* @channel: channel number
|
||||
* @id: Logical Target ID for reset
|
||||
*
|
||||
* @ioc
|
||||
* @channel
|
||||
* @id
|
||||
*
|
||||
* Returns (1) success
|
||||
* Return: (1) success
|
||||
* (0) failure
|
||||
*
|
||||
**/
|
||||
|
@ -1119,15 +1122,15 @@ mptsas_block_io_starget(struct scsi_target *starget)
|
|||
}
|
||||
|
||||
/**
|
||||
* mptsas_target_reset_queue
|
||||
* mptsas_target_reset_queue - queue a target reset
|
||||
*
|
||||
* Receive request for TARGET_RESET after receiving an firmware
|
||||
* @ioc: Pointer to MPT_ADAPTER structure
|
||||
* @sas_event_data: SAS Device Status Change Event data
|
||||
*
|
||||
* Receive request for TARGET_RESET after receiving a firmware
|
||||
* event NOT_RESPONDING_EVENT, then put command in link list
|
||||
* and queue if task_queue already in use.
|
||||
*
|
||||
* @ioc
|
||||
* @sas_event_data
|
||||
*
|
||||
**/
|
||||
static void
|
||||
mptsas_target_reset_queue(MPT_ADAPTER *ioc,
|
||||
|
@ -1207,9 +1210,11 @@ mptsas_schedule_target_reset(void *iocp)
|
|||
/**
|
||||
* mptsas_taskmgmt_complete - complete SAS task management function
|
||||
* @ioc: Pointer to MPT_ADAPTER structure
|
||||
* @mf: MPT message frame
|
||||
* @mr: SCSI Task Management Reply structure ptr (may be %NULL)
|
||||
*
|
||||
* Completion for TARGET_RESET after NOT_RESPONDING_EVENT, enable work
|
||||
* queue to finish off removing device from upper layers. then send next
|
||||
* queue to finish off removing device from upper layers, then send next
|
||||
* TARGET_RESET in the queue.
|
||||
**/
|
||||
static int
|
||||
|
@ -1300,10 +1305,10 @@ mptsas_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
|
|||
}
|
||||
|
||||
/**
|
||||
* mptscsih_ioc_reset
|
||||
* mptsas_ioc_reset - issue an IOC reset for this reset phase
|
||||
*
|
||||
* @ioc
|
||||
* @reset_phase
|
||||
* @ioc: Pointer to MPT_ADAPTER structure
|
||||
* @reset_phase: id of phase of reset
|
||||
*
|
||||
**/
|
||||
static int
|
||||
|
@ -1350,7 +1355,7 @@ mptsas_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
|
|||
|
||||
|
||||
/**
|
||||
* enum device_state -
|
||||
* enum device_state - TUR device state
|
||||
* @DEVICE_RETRY: need to retry the TUR
|
||||
* @DEVICE_ERROR: TUR return error, don't add device
|
||||
* @DEVICE_READY: device can be added
|
||||
|
@ -1941,7 +1946,7 @@ mptsas_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *SCpnt)
|
|||
}
|
||||
|
||||
/**
|
||||
* mptsas_mptsas_eh_timed_out - resets the scsi_cmnd timeout
|
||||
* mptsas_eh_timed_out - resets the scsi_cmnd timeout
|
||||
* if the device under question is currently in the
|
||||
* device removal delay.
|
||||
* @sc: scsi command that the midlayer is about to time out
|
||||
|
@ -2839,14 +2844,15 @@ struct rep_manu_reply{
|
|||
};
|
||||
|
||||
/**
|
||||
* mptsas_exp_repmanufacture_info -
|
||||
* mptsas_exp_repmanufacture_info - sets expander manufacturer info
|
||||
* @ioc: per adapter object
|
||||
* @sas_address: expander sas address
|
||||
* @edev: the sas_expander_device object
|
||||
*
|
||||
* Fills in the sas_expander_device object when SMP port is created.
|
||||
* For an edge expander or a fanout expander:
|
||||
* fills in the sas_expander_device object when SMP port is created.
|
||||
*
|
||||
* Returns 0 for success, non-zero for failure.
|
||||
* Return: 0 for success, non-zero for failure.
|
||||
*/
|
||||
static int
|
||||
mptsas_exp_repmanufacture_info(MPT_ADAPTER *ioc,
|
||||
|
@ -3284,7 +3290,7 @@ static int mptsas_probe_one_phy(struct device *dev,
|
|||
rphy_to_expander_device(rphy));
|
||||
}
|
||||
|
||||
/* If the device exists,verify it wasn't previously flagged
|
||||
/* If the device exists, verify it wasn't previously flagged
|
||||
as a missing device. If so, clear it */
|
||||
vtarget = mptsas_find_vtarget(ioc,
|
||||
phy_info->attached.channel,
|
||||
|
@ -3611,8 +3617,7 @@ static void mptsas_expander_delete(MPT_ADAPTER *ioc,
|
|||
|
||||
/**
|
||||
* mptsas_send_expander_event - expanders events
|
||||
* @ioc: Pointer to MPT_ADAPTER structure
|
||||
* @expander_data: event data
|
||||
* @fw_event: event data
|
||||
*
|
||||
*
|
||||
* This function handles adding, removing, and refreshing
|
||||
|
@ -3657,9 +3662,9 @@ mptsas_send_expander_event(struct fw_event_work *fw_event)
|
|||
|
||||
|
||||
/**
|
||||
* mptsas_expander_add -
|
||||
* mptsas_expander_add - adds a newly discovered expander
|
||||
* @ioc: Pointer to MPT_ADAPTER structure
|
||||
* @handle:
|
||||
* @handle: device handle
|
||||
*
|
||||
*/
|
||||
static struct mptsas_portinfo *
|
||||
|
@ -4000,9 +4005,9 @@ mptsas_probe_devices(MPT_ADAPTER *ioc)
|
|||
}
|
||||
|
||||
/**
|
||||
* mptsas_scan_sas_topology -
|
||||
* mptsas_scan_sas_topology - scans new SAS topology
|
||||
* (part of probe or rescan)
|
||||
* @ioc: Pointer to MPT_ADAPTER structure
|
||||
* @sas_address:
|
||||
*
|
||||
**/
|
||||
static void
|
||||
|
@ -4150,11 +4155,12 @@ mptsas_find_phyinfo_by_sas_address(MPT_ADAPTER *ioc, u64 sas_address)
|
|||
}
|
||||
|
||||
/**
|
||||
* mptsas_find_phyinfo_by_phys_disk_num -
|
||||
* mptsas_find_phyinfo_by_phys_disk_num - find phyinfo for the
|
||||
* specified @phys_disk_num
|
||||
* @ioc: Pointer to MPT_ADAPTER structure
|
||||
* @phys_disk_num:
|
||||
* @channel:
|
||||
* @id:
|
||||
* @phys_disk_num: (hot plug) physical disk number (for RAID support)
|
||||
* @channel: channel number
|
||||
* @id: Logical Target ID
|
||||
*
|
||||
**/
|
||||
static struct mptsas_phyinfo *
|
||||
|
@ -4773,8 +4779,9 @@ mptsas_send_raid_event(struct fw_event_work *fw_event)
|
|||
* @lun: Logical unit for reset (if appropriate)
|
||||
* @task_context: Context for the task to be aborted
|
||||
* @timeout: timeout for task management control
|
||||
* @issue_reset: set to 1 on return if reset is needed, else 0
|
||||
*
|
||||
* return 0 on success and -1 on failure:
|
||||
* Return: 0 on success or -1 on failure.
|
||||
*
|
||||
*/
|
||||
static int
|
||||
|
@ -4847,9 +4854,9 @@ mptsas_issue_tm(MPT_ADAPTER *ioc, u8 type, u8 channel, u8 id, u64 lun,
|
|||
|
||||
/**
|
||||
* mptsas_broadcast_primitive_work - Handle broadcast primitives
|
||||
* @work: work queue payload containing info describing the event
|
||||
* @fw_event: work queue payload containing info describing the event
|
||||
*
|
||||
* this will be handled in workqueue context.
|
||||
* This will be handled in workqueue context.
|
||||
*/
|
||||
static void
|
||||
mptsas_broadcast_primitive_work(struct fw_event_work *fw_event)
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
#include <uapi/scsi/fc/fc_els.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/overflow.h>
|
||||
|
||||
#include <linux/blk-cgroup.h>
|
||||
#include "nvme.h"
|
||||
#include "fabrics.h"
|
||||
#include <linux/nvme-fc-driver.h>
|
||||
|
@ -3808,10 +3808,80 @@ static ssize_t nvme_fc_nvme_discovery_store(struct device *dev,
|
|||
|
||||
return count;
|
||||
}
|
||||
|
||||
/* Parse the cgroup id from a buf and return the length of cgrpid */
|
||||
static int fc_parse_cgrpid(const char *buf, u64 *id)
|
||||
{
|
||||
char cgrp_id[16+1];
|
||||
int cgrpid_len, j;
|
||||
|
||||
memset(cgrp_id, 0x0, sizeof(cgrp_id));
|
||||
for (cgrpid_len = 0, j = 0; cgrpid_len < 17; cgrpid_len++) {
|
||||
if (buf[cgrpid_len] != ':')
|
||||
cgrp_id[cgrpid_len] = buf[cgrpid_len];
|
||||
else {
|
||||
j = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!j)
|
||||
return -EINVAL;
|
||||
if (kstrtou64(cgrp_id, 16, id) < 0)
|
||||
return -EINVAL;
|
||||
return cgrpid_len;
|
||||
}
|
||||
|
||||
/*
|
||||
* fc_update_appid: Parse and update the appid in the blkcg associated with
|
||||
* cgroupid.
|
||||
* @buf: buf contains both cgrpid and appid info
|
||||
* @count: size of the buffer
|
||||
*/
|
||||
static int fc_update_appid(const char *buf, size_t count)
|
||||
{
|
||||
u64 cgrp_id;
|
||||
int appid_len = 0;
|
||||
int cgrpid_len = 0;
|
||||
char app_id[FC_APPID_LEN];
|
||||
int ret = 0;
|
||||
|
||||
if (buf[count-1] == '\n')
|
||||
count--;
|
||||
|
||||
if ((count > (16+1+FC_APPID_LEN)) || (!strchr(buf, ':')))
|
||||
return -EINVAL;
|
||||
|
||||
cgrpid_len = fc_parse_cgrpid(buf, &cgrp_id);
|
||||
if (cgrpid_len < 0)
|
||||
return -EINVAL;
|
||||
appid_len = count - cgrpid_len - 1;
|
||||
if (appid_len > FC_APPID_LEN)
|
||||
return -EINVAL;
|
||||
|
||||
memset(app_id, 0x0, sizeof(app_id));
|
||||
memcpy(app_id, &buf[cgrpid_len+1], appid_len);
|
||||
ret = blkcg_set_fc_appid(app_id, cgrp_id, sizeof(app_id));
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
return count;
|
||||
}
|
||||
|
||||
static ssize_t fc_appid_store(struct device *dev,
|
||||
struct device_attribute *attr, const char *buf, size_t count)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
ret = fc_update_appid(buf, count);
|
||||
if (ret < 0)
|
||||
return -EINVAL;
|
||||
return count;
|
||||
}
|
||||
static DEVICE_ATTR(nvme_discovery, 0200, NULL, nvme_fc_nvme_discovery_store);
|
||||
static DEVICE_ATTR(appid_store, 0200, NULL, fc_appid_store);
|
||||
|
||||
static struct attribute *nvme_fc_attrs[] = {
|
||||
&dev_attr_nvme_discovery.attr,
|
||||
&dev_attr_appid_store.attr,
|
||||
NULL
|
||||
};
|
||||
|
||||
|
|
|
@ -856,10 +856,7 @@ void zfcp_scsi_set_prot(struct zfcp_adapter *adapter)
|
|||
*/
|
||||
void zfcp_scsi_dif_sense_error(struct scsi_cmnd *scmd, int ascq)
|
||||
{
|
||||
scsi_build_sense_buffer(1, scmd->sense_buffer,
|
||||
ILLEGAL_REQUEST, 0x10, ascq);
|
||||
set_driver_byte(scmd, DRIVER_SENSE);
|
||||
scmd->result |= SAM_STAT_CHECK_CONDITION;
|
||||
scsi_build_sense(scmd, 1, ILLEGAL_REQUEST, 0x10, ascq);
|
||||
set_host_byte(scmd, DID_SOFT_ERROR);
|
||||
}
|
||||
|
||||
|
|
|
@ -303,10 +303,10 @@ static int twa_aen_drain_queue(TW_Device_Extension *tw_dev, int no_check_reset)
|
|||
|
||||
/* Initialize sglist */
|
||||
memset(&sglist, 0, sizeof(TW_SG_Entry));
|
||||
sglist[0].length = TW_SECTOR_SIZE;
|
||||
sglist[0].address = tw_dev->generic_buffer_phys[request_id];
|
||||
sglist[0].length = cpu_to_le32(TW_SECTOR_SIZE);
|
||||
sglist[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
|
||||
|
||||
if (sglist[0].address & TW_ALIGNMENT_9000_SGL) {
|
||||
if (tw_dev->generic_buffer_phys[request_id] & TW_ALIGNMENT_9000_SGL) {
|
||||
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Found unaligned address during AEN drain");
|
||||
goto out;
|
||||
}
|
||||
|
@ -440,8 +440,8 @@ static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id)
|
|||
|
||||
/* Initialize sglist */
|
||||
memset(&sglist, 0, sizeof(TW_SG_Entry));
|
||||
sglist[0].length = TW_SECTOR_SIZE;
|
||||
sglist[0].address = tw_dev->generic_buffer_phys[request_id];
|
||||
sglist[0].length = cpu_to_le32(TW_SECTOR_SIZE);
|
||||
sglist[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
|
||||
|
||||
/* Mark internal command */
|
||||
tw_dev->srb[request_id] = NULL;
|
||||
|
@ -501,9 +501,8 @@ static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
|
|||
Sunday 12:00AM */
|
||||
local_time = (ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60));
|
||||
div_u64_rem(local_time - (3 * 86400), 604800, &schedulertime);
|
||||
schedulertime = cpu_to_le32(schedulertime % 604800);
|
||||
|
||||
memcpy(param->data, &schedulertime, sizeof(u32));
|
||||
memcpy(param->data, &(__le32){cpu_to_le32(schedulertime)}, sizeof(__le32));
|
||||
|
||||
/* Mark internal command */
|
||||
tw_dev->srb[request_id] = NULL;
|
||||
|
@ -676,7 +675,9 @@ static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long
|
|||
data_buffer_length_adjusted = (driver_command.buffer_length + 511) & ~511;
|
||||
|
||||
/* Now allocate ioctl buf memory */
|
||||
cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, &dma_handle, GFP_KERNEL);
|
||||
cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev,
|
||||
sizeof(TW_Ioctl_Buf_Apache) + data_buffer_length_adjusted,
|
||||
&dma_handle, GFP_KERNEL);
|
||||
if (!cpu_addr) {
|
||||
retval = TW_IOCTL_ERROR_OS_ENOMEM;
|
||||
goto out2;
|
||||
|
@ -685,7 +686,7 @@ static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long
|
|||
tw_ioctl = (TW_Ioctl_Buf_Apache *)cpu_addr;
|
||||
|
||||
/* Now copy down the entire ioctl */
|
||||
if (copy_from_user(tw_ioctl, argp, driver_command.buffer_length + sizeof(TW_Ioctl_Buf_Apache) - 1))
|
||||
if (copy_from_user(tw_ioctl, argp, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length))
|
||||
goto out3;
|
||||
|
||||
/* See which ioctl we are doing */
|
||||
|
@ -867,11 +868,13 @@ static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long
|
|||
}
|
||||
|
||||
/* Now copy the entire response to userspace */
|
||||
if (copy_to_user(argp, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length - 1) == 0)
|
||||
if (copy_to_user(argp, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length) == 0)
|
||||
retval = 0;
|
||||
out3:
|
||||
/* Now free ioctl buf memory */
|
||||
dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, cpu_addr, dma_handle);
|
||||
dma_free_coherent(&tw_dev->tw_pci_dev->dev,
|
||||
sizeof(TW_Ioctl_Buf_Apache) + data_buffer_length_adjusted,
|
||||
cpu_addr, dma_handle);
|
||||
out2:
|
||||
mutex_unlock(&tw_dev->ioctl_lock);
|
||||
out:
|
||||
|
@ -1000,19 +1003,13 @@ static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_
|
|||
if (print_host)
|
||||
printk(KERN_WARNING "3w-9xxx: scsi%d: ERROR: (0x%02X:0x%04X): %s:%s.\n",
|
||||
tw_dev->host->host_no,
|
||||
TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
|
||||
full_command_packet->header.status_block.error,
|
||||
error_str[0] == '\0' ?
|
||||
twa_string_lookup(twa_error_table,
|
||||
full_command_packet->header.status_block.error) : error_str,
|
||||
TW_MESSAGE_SOURCE_CONTROLLER_ERROR, error,
|
||||
error_str[0] ? error_str : twa_string_lookup(twa_error_table, error),
|
||||
full_command_packet->header.err_specific_desc);
|
||||
else
|
||||
printk(KERN_WARNING "3w-9xxx: ERROR: (0x%02X:0x%04X): %s:%s.\n",
|
||||
TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
|
||||
full_command_packet->header.status_block.error,
|
||||
error_str[0] == '\0' ?
|
||||
twa_string_lookup(twa_error_table,
|
||||
full_command_packet->header.status_block.error) : error_str,
|
||||
TW_MESSAGE_SOURCE_CONTROLLER_ERROR, error,
|
||||
error_str[0] ? error_str : twa_string_lookup(twa_error_table, error),
|
||||
full_command_packet->header.err_specific_desc);
|
||||
}
|
||||
|
||||
|
@ -1129,12 +1126,11 @@ static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
|
|||
tw_initconnect->opcode__reserved = TW_OPRES_IN(0, TW_OP_INIT_CONNECTION);
|
||||
tw_initconnect->request_id = request_id;
|
||||
tw_initconnect->message_credits = cpu_to_le16(message_credits);
|
||||
tw_initconnect->features = set_features;
|
||||
|
||||
/* Turn on 64-bit sgl support if we need to */
|
||||
tw_initconnect->features |= sizeof(dma_addr_t) > 4 ? 1 : 0;
|
||||
set_features |= sizeof(dma_addr_t) > 4 ? 1 : 0;
|
||||
|
||||
tw_initconnect->features = cpu_to_le32(tw_initconnect->features);
|
||||
tw_initconnect->features = cpu_to_le32(set_features);
|
||||
|
||||
if (set_features & TW_EXTENDED_INIT_CONNECT) {
|
||||
tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE_EXTENDED;
|
||||
|
@ -1342,13 +1338,15 @@ static irqreturn_t twa_interrupt(int irq, void *dev_instance)
|
|||
/* If error, command failed */
|
||||
if (error == 1) {
|
||||
/* Ask for a host reset */
|
||||
cmd->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
|
||||
cmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
|
||||
}
|
||||
|
||||
/* Report residual bytes for single sgl */
|
||||
if ((scsi_sg_count(cmd) <= 1) && (full_command_packet->command.newcommand.status == 0)) {
|
||||
if (full_command_packet->command.newcommand.sg_list[0].length < scsi_bufflen(tw_dev->srb[request_id]))
|
||||
scsi_set_resid(cmd, scsi_bufflen(cmd) - full_command_packet->command.newcommand.sg_list[0].length);
|
||||
u32 length = le32_to_cpu(full_command_packet->command.newcommand.sg_list[0].length);
|
||||
|
||||
if (length < scsi_bufflen(cmd))
|
||||
scsi_set_resid(cmd, scsi_bufflen(cmd) - length);
|
||||
}
|
||||
|
||||
/* Now complete the io */
|
||||
|
@ -1390,13 +1388,13 @@ static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_comm
|
|||
if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
|
||||
newcommand = &full_command_packet->command.newcommand;
|
||||
newcommand->request_id__lunl =
|
||||
cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id));
|
||||
TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id);
|
||||
if (length) {
|
||||
newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
|
||||
newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache));
|
||||
newcommand->sg_list[0].length = cpu_to_le32(length);
|
||||
}
|
||||
newcommand->sgl_entries__lunh =
|
||||
cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), length ? 1 : 0));
|
||||
TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), length ? 1 : 0);
|
||||
} else {
|
||||
oldcommand = &full_command_packet->command.oldcommand;
|
||||
oldcommand->request_id = request_id;
|
||||
|
@ -1407,7 +1405,7 @@ static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_comm
|
|||
sgl = (TW_SG_Entry *)((u32 *)oldcommand+oldcommand->size - (sizeof(TW_SG_Entry)/4) + pae);
|
||||
else
|
||||
sgl = (TW_SG_Entry *)((u32 *)oldcommand+TW_SGL_OUT(oldcommand->opcode__sgloffset));
|
||||
sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
|
||||
sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache));
|
||||
sgl->length = cpu_to_le32(length);
|
||||
|
||||
oldcommand->size += pae;
|
||||
|
@ -1831,10 +1829,10 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
|
|||
if (srb) {
|
||||
command_packet->unit = srb->device->id;
|
||||
command_packet->request_id__lunl =
|
||||
cpu_to_le16(TW_REQ_LUN_IN(srb->device->lun, request_id));
|
||||
TW_REQ_LUN_IN(srb->device->lun, request_id);
|
||||
} else {
|
||||
command_packet->request_id__lunl =
|
||||
cpu_to_le16(TW_REQ_LUN_IN(0, request_id));
|
||||
TW_REQ_LUN_IN(0, request_id);
|
||||
command_packet->unit = 0;
|
||||
}
|
||||
|
||||
|
@ -1866,19 +1864,19 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
|
|||
}
|
||||
}
|
||||
}
|
||||
command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN((srb->device->lun >> 4), scsi_sg_count(tw_dev->srb[request_id])));
|
||||
command_packet->sgl_entries__lunh = TW_REQ_LUN_IN((srb->device->lun >> 4), scsi_sg_count(tw_dev->srb[request_id]));
|
||||
}
|
||||
} else {
|
||||
/* Internal cdb post */
|
||||
for (i = 0; i < use_sg; i++) {
|
||||
command_packet->sg_list[i].address = TW_CPU_TO_SGL(sglistarg[i].address);
|
||||
command_packet->sg_list[i].length = cpu_to_le32(sglistarg[i].length);
|
||||
command_packet->sg_list[i].address = sglistarg[i].address;
|
||||
command_packet->sg_list[i].length = sglistarg[i].length;
|
||||
if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
|
||||
TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2f, "Found unaligned sgl address during internal post");
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN(0, use_sg));
|
||||
command_packet->sgl_entries__lunh = TW_REQ_LUN_IN(0, use_sg);
|
||||
}
|
||||
|
||||
if (srb) {
|
||||
|
@ -2103,7 +2101,7 @@ static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
|
|||
TW_PARAM_FWVER, TW_PARAM_FWVER_LENGTH),
|
||||
(char *)twa_get_param(tw_dev, 1, TW_VERSION_TABLE,
|
||||
TW_PARAM_BIOSVER, TW_PARAM_BIOSVER_LENGTH),
|
||||
le32_to_cpu(*(int *)twa_get_param(tw_dev, 2, TW_INFORMATION_TABLE,
|
||||
le32_to_cpu(*(__le32 *)twa_get_param(tw_dev, 2, TW_INFORMATION_TABLE,
|
||||
TW_PARAM_PORTCOUNT, TW_PARAM_PORTCOUNT_LENGTH)));
|
||||
|
||||
/* Try to enable MSI */
|
||||
|
|
|
@ -50,7 +50,7 @@
|
|||
/* AEN string type */
|
||||
typedef struct TAG_twa_message_type {
|
||||
unsigned int code;
|
||||
char* text;
|
||||
char *text;
|
||||
} twa_message_type;
|
||||
|
||||
/* AEN strings */
|
||||
|
@ -435,8 +435,8 @@ static twa_message_type twa_error_table[] = {
|
|||
|
||||
/* request_id: 12, lun: 4 */
|
||||
#define TW_REQ_LUN_IN(lun, request_id) \
|
||||
(((lun << 12) & 0xf000) | (request_id & 0xfff))
|
||||
#define TW_LUN_OUT(lun) ((lun >> 12) & 0xf)
|
||||
cpu_to_le16(((lun << 12) & 0xf000) | (request_id & 0xfff))
|
||||
#define TW_LUN_OUT(lun) ((le16_to_cpu(lun) >> 12) & 0xf)
|
||||
|
||||
/* Macros */
|
||||
#define TW_CONTROL_REG_ADDR(x) (x->base_addr)
|
||||
|
@ -483,70 +483,75 @@ printk(KERN_WARNING "3w-9xxx: ERROR: (0x%02X:0x%04X): %s.\n",a,b,c); \
|
|||
#define TW_APACHE_MAX_SGL_LENGTH (sizeof(dma_addr_t) > 4 ? 72 : 109)
|
||||
#define TW_ESCALADE_MAX_SGL_LENGTH (sizeof(dma_addr_t) > 4 ? 41 : 62)
|
||||
#define TW_PADDING_LENGTH (sizeof(dma_addr_t) > 4 ? 8 : 0)
|
||||
#define TW_CPU_TO_SGL(x) (sizeof(dma_addr_t) > 4 ? cpu_to_le64(x) : cpu_to_le32(x))
|
||||
|
||||
#pragma pack(1)
|
||||
#if IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT)
|
||||
typedef __le64 twa_addr_t;
|
||||
#define TW_CPU_TO_SGL(x) cpu_to_le64(x)
|
||||
#else
|
||||
typedef __le32 twa_addr_t;
|
||||
#define TW_CPU_TO_SGL(x) cpu_to_le32(x)
|
||||
#endif
|
||||
|
||||
/* Scatter Gather List Entry */
|
||||
typedef struct TAG_TW_SG_Entry {
|
||||
dma_addr_t address;
|
||||
u32 length;
|
||||
} TW_SG_Entry;
|
||||
twa_addr_t address;
|
||||
__le32 length;
|
||||
} __packed TW_SG_Entry;
|
||||
|
||||
/* Command Packet */
|
||||
typedef struct TW_Command {
|
||||
unsigned char opcode__sgloffset;
|
||||
unsigned char size;
|
||||
unsigned char request_id;
|
||||
unsigned char unit__hostid;
|
||||
u8 opcode__sgloffset;
|
||||
u8 size;
|
||||
u8 request_id;
|
||||
u8 unit__hostid;
|
||||
/* Second DWORD */
|
||||
unsigned char status;
|
||||
unsigned char flags;
|
||||
u8 status;
|
||||
u8 flags;
|
||||
union {
|
||||
unsigned short block_count;
|
||||
unsigned short parameter_count;
|
||||
__le16 block_count;
|
||||
__le16 parameter_count;
|
||||
} byte6_offset;
|
||||
union {
|
||||
struct {
|
||||
u32 lba;
|
||||
TW_SG_Entry sgl[TW_ESCALADE_MAX_SGL_LENGTH];
|
||||
dma_addr_t padding;
|
||||
__le32 lba;
|
||||
TW_SG_Entry sgl[TW_ESCALADE_MAX_SGL_LENGTH];
|
||||
twa_addr_t padding;
|
||||
} io;
|
||||
struct {
|
||||
TW_SG_Entry sgl[TW_ESCALADE_MAX_SGL_LENGTH];
|
||||
u32 padding;
|
||||
dma_addr_t padding2;
|
||||
TW_SG_Entry sgl[TW_ESCALADE_MAX_SGL_LENGTH];
|
||||
__le32 padding;
|
||||
twa_addr_t padding2;
|
||||
} param;
|
||||
} byte8_offset;
|
||||
} TW_Command;
|
||||
|
||||
/* Command Packet for 9000+ controllers */
|
||||
typedef struct TAG_TW_Command_Apache {
|
||||
unsigned char opcode__reserved;
|
||||
unsigned char unit;
|
||||
unsigned short request_id__lunl;
|
||||
unsigned char status;
|
||||
unsigned char sgl_offset;
|
||||
unsigned short sgl_entries__lunh;
|
||||
unsigned char cdb[16];
|
||||
TW_SG_Entry sg_list[TW_APACHE_MAX_SGL_LENGTH];
|
||||
unsigned char padding[TW_PADDING_LENGTH];
|
||||
u8 opcode__reserved;
|
||||
u8 unit;
|
||||
__le16 request_id__lunl;
|
||||
u8 status;
|
||||
u8 sgl_offset;
|
||||
__le16 sgl_entries__lunh;
|
||||
u8 cdb[16];
|
||||
TW_SG_Entry sg_list[TW_APACHE_MAX_SGL_LENGTH];
|
||||
u8 padding[TW_PADDING_LENGTH];
|
||||
} TW_Command_Apache;
|
||||
|
||||
/* New command packet header */
|
||||
typedef struct TAG_TW_Command_Apache_Header {
|
||||
unsigned char sense_data[TW_SENSE_DATA_LENGTH];
|
||||
struct {
|
||||
char reserved[4];
|
||||
unsigned short error;
|
||||
unsigned char padding;
|
||||
unsigned char severity__reserved;
|
||||
u8 reserved[4];
|
||||
__le16 error;
|
||||
u8 padding;
|
||||
u8 severity__reserved;
|
||||
} status_block;
|
||||
unsigned char err_specific_desc[98];
|
||||
struct {
|
||||
unsigned char size_header;
|
||||
unsigned short reserved;
|
||||
unsigned char size_sense;
|
||||
u8 size_header;
|
||||
u8 reserved[2];
|
||||
u8 size_sense;
|
||||
} header_desc;
|
||||
} TW_Command_Apache_Header;
|
||||
|
||||
|
@ -561,19 +566,19 @@ typedef struct TAG_TW_Command_Full {
|
|||
|
||||
/* Initconnection structure */
|
||||
typedef struct TAG_TW_Initconnect {
|
||||
unsigned char opcode__reserved;
|
||||
unsigned char size;
|
||||
unsigned char request_id;
|
||||
unsigned char res2;
|
||||
unsigned char status;
|
||||
unsigned char flags;
|
||||
unsigned short message_credits;
|
||||
u32 features;
|
||||
unsigned short fw_srl;
|
||||
unsigned short fw_arch_id;
|
||||
unsigned short fw_branch;
|
||||
unsigned short fw_build;
|
||||
u32 result;
|
||||
u8 opcode__reserved;
|
||||
u8 size;
|
||||
u8 request_id;
|
||||
u8 res2;
|
||||
u8 status;
|
||||
u8 flags;
|
||||
__le16 message_credits;
|
||||
__le32 features;
|
||||
__le16 fw_srl;
|
||||
__le16 fw_arch_id;
|
||||
__le16 fw_branch;
|
||||
__le16 fw_build;
|
||||
__le32 result;
|
||||
} TW_Initconnect;
|
||||
|
||||
/* Event info structure */
|
||||
|
@ -602,7 +607,7 @@ typedef struct TAG_TW_Ioctl_Apache {
|
|||
TW_Ioctl_Driver_Command driver_command;
|
||||
char padding[488];
|
||||
TW_Command_Full firmware_command;
|
||||
char data_buffer[1];
|
||||
char data_buffer[];
|
||||
} TW_Ioctl_Buf_Apache;
|
||||
|
||||
/* Lock structure for ioctl get/release lock */
|
||||
|
@ -614,11 +619,11 @@ typedef struct TAG_TW_Lock {
|
|||
|
||||
/* GetParam descriptor */
|
||||
typedef struct {
|
||||
unsigned short table_id;
|
||||
unsigned short parameter_id;
|
||||
unsigned short parameter_size_bytes;
|
||||
unsigned short actual_parameter_size_bytes;
|
||||
unsigned char data[1];
|
||||
__le16 table_id;
|
||||
__le16 parameter_id;
|
||||
__le16 parameter_size_bytes;
|
||||
__le16 actual_parameter_size_bytes;
|
||||
u8 data[];
|
||||
} TW_Param_Apache, *PTW_Param_Apache;
|
||||
|
||||
/* Response queue */
|
||||
|
@ -645,8 +650,6 @@ typedef struct TAG_TW_Compatibility_Info
|
|||
unsigned short fw_on_ctlr_build;
|
||||
} TW_Compatibility_Info;
|
||||
|
||||
#pragma pack()
|
||||
|
||||
typedef struct TAG_TW_Device_Extension {
|
||||
u32 __iomem *base_addr;
|
||||
unsigned long *generic_buffer_virt[TW_Q_LENGTH];
|
||||
|
|
|
@ -429,7 +429,7 @@ static int tw_decode_sense(TW_Device_Extension *tw_dev, int request_id, int fill
|
|||
/* Additional sense code qualifier */
|
||||
tw_dev->srb[request_id]->sense_buffer[13] = tw_sense_table[i][3];
|
||||
|
||||
tw_dev->srb[request_id]->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
|
||||
tw_dev->srb[request_id]->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
|
||||
return TW_ISR_DONT_RESULT; /* Special case for isr to not over-write result */
|
||||
}
|
||||
}
|
||||
|
@ -1977,7 +1977,7 @@ static int tw_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_c
|
|||
printk(KERN_NOTICE "3w-xxxx: scsi%d: Unknown scsi opcode: 0x%x\n", tw_dev->host->host_no, *command);
|
||||
tw_dev->state[request_id] = TW_S_COMPLETED;
|
||||
tw_state_request_finish(tw_dev, request_id);
|
||||
scsi_build_sense_buffer(1, SCpnt->sense_buffer, ILLEGAL_REQUEST, 0x20, 0);
|
||||
scsi_build_sense(SCpnt, 1, ILLEGAL_REQUEST, 0x20, 0);
|
||||
done(SCpnt);
|
||||
retval = 0;
|
||||
}
|
||||
|
@ -2159,7 +2159,7 @@ static irqreturn_t tw_interrupt(int irq, void *dev_instance)
|
|||
/* If error, command failed */
|
||||
if (error == 1) {
|
||||
/* Ask for a host reset */
|
||||
tw_dev->srb[request_id]->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
|
||||
tw_dev->srb[request_id]->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
|
||||
}
|
||||
|
||||
/* Now complete the io */
|
||||
|
|
|
@ -978,10 +978,10 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
|
|||
if (NCR_700_get_tag_neg_state(SCp->device) == NCR_700_DURING_TAG_NEGOTIATION)
|
||||
NCR_700_set_tag_neg_state(SCp->device,
|
||||
NCR_700_FINISHED_TAG_NEGOTIATION);
|
||||
|
||||
|
||||
/* check for contingent allegiance conditions */
|
||||
if (hostdata->status[0] >> 1 == CHECK_CONDITION ||
|
||||
hostdata->status[0] >> 1 == COMMAND_TERMINATED) {
|
||||
if (hostdata->status[0] == SAM_STAT_CHECK_CONDITION ||
|
||||
hostdata->status[0] == SAM_STAT_COMMAND_TERMINATED) {
|
||||
struct NCR_700_command_slot *slot =
|
||||
(struct NCR_700_command_slot *)SCp->host_scribble;
|
||||
if(slot->flags == NCR_700_FLAG_AUTOSENSE) {
|
||||
|
|
|
@ -40,7 +40,7 @@ struct sccb_mgr_info {
|
|||
u16 si_per_targ_ultra_nego;
|
||||
u16 si_per_targ_no_disc;
|
||||
u16 si_per_targ_wide_nego;
|
||||
u16 si_flags;
|
||||
u16 si_mflags;
|
||||
unsigned char si_card_family;
|
||||
unsigned char si_bustype;
|
||||
unsigned char si_card_model[3];
|
||||
|
@ -304,40 +304,12 @@ typedef struct SCCBscam_info {
|
|||
|
||||
} SCCBSCAM_INFO;
|
||||
|
||||
#define SCSI_REQUEST_SENSE 0x03
|
||||
#define SCSI_READ 0x08
|
||||
#define SCSI_WRITE 0x0A
|
||||
#define SCSI_START_STOP_UNIT 0x1B
|
||||
#define SCSI_READ_EXTENDED 0x28
|
||||
#define SCSI_WRITE_EXTENDED 0x2A
|
||||
#define SCSI_WRITE_AND_VERIFY 0x2E
|
||||
|
||||
#define SSGOOD 0x00
|
||||
#define SSCHECK 0x02
|
||||
#define SSQ_FULL 0x28
|
||||
|
||||
#define SMCMD_COMP 0x00
|
||||
#define SMEXT 0x01
|
||||
#define SMSAVE_DATA_PTR 0x02
|
||||
#define SMREST_DATA_PTR 0x03
|
||||
#define SMDISC 0x04
|
||||
#define SMABORT 0x06
|
||||
#define SMREJECT 0x07
|
||||
#define SMNO_OP 0x08
|
||||
#define SMPARITY 0x09
|
||||
#define SMDEV_RESET 0x0C
|
||||
#define SMABORT_TAG 0x0D
|
||||
#define SMINIT_RECOVERY 0x0F
|
||||
#define SMREL_RECOVERY 0x10
|
||||
|
||||
#define SMIDENT 0x80
|
||||
#define DISC_PRIV 0x40
|
||||
|
||||
#define SMSYNC 0x01
|
||||
#define SMWDTR 0x03
|
||||
#define SM8BIT 0x00
|
||||
#define SM16BIT 0x01
|
||||
#define SMIGNORWR 0x23 /* Ignore Wide Residue */
|
||||
|
||||
#define SIX_BYTE_CMD 0x06
|
||||
#define TWELVE_BYTE_CMD 0x0C
|
||||
|
@ -1073,22 +1045,22 @@ static int FlashPoint_ProbeHostAdapter(struct sccb_mgr_info *pCardInfo)
|
|||
ScamFlg =
|
||||
(unsigned char)FPT_utilEERead(ioport, SCAM_CONFIG / 2);
|
||||
|
||||
pCardInfo->si_flags = 0x0000;
|
||||
pCardInfo->si_mflags = 0x0000;
|
||||
|
||||
if (i & 0x01)
|
||||
pCardInfo->si_flags |= SCSI_PARITY_ENA;
|
||||
pCardInfo->si_mflags |= SCSI_PARITY_ENA;
|
||||
|
||||
if (!(i & 0x02))
|
||||
pCardInfo->si_flags |= SOFT_RESET;
|
||||
pCardInfo->si_mflags |= SOFT_RESET;
|
||||
|
||||
if (i & 0x10)
|
||||
pCardInfo->si_flags |= EXTENDED_TRANSLATION;
|
||||
pCardInfo->si_mflags |= EXTENDED_TRANSLATION;
|
||||
|
||||
if (ScamFlg & SCAM_ENABLED)
|
||||
pCardInfo->si_flags |= FLAG_SCAM_ENABLED;
|
||||
pCardInfo->si_mflags |= FLAG_SCAM_ENABLED;
|
||||
|
||||
if (ScamFlg & SCAM_LEVEL2)
|
||||
pCardInfo->si_flags |= FLAG_SCAM_LEVEL2;
|
||||
pCardInfo->si_mflags |= FLAG_SCAM_LEVEL2;
|
||||
|
||||
j = (RD_HARPOON(ioport + hp_bm_ctrl) & ~SCSI_TERM_ENA_L);
|
||||
if (i & 0x04) {
|
||||
|
@ -1104,7 +1076,7 @@ static int FlashPoint_ProbeHostAdapter(struct sccb_mgr_info *pCardInfo)
|
|||
|
||||
if (!(RD_HARPOON(ioport + hp_page_ctrl) & NARROW_SCSI_CARD))
|
||||
|
||||
pCardInfo->si_flags |= SUPPORT_16TAR_32LUN;
|
||||
pCardInfo->si_mflags |= SUPPORT_16TAR_32LUN;
|
||||
|
||||
pCardInfo->si_card_family = HARPOON_FAMILY;
|
||||
pCardInfo->si_bustype = BUSTYPE_PCI;
|
||||
|
@ -1140,15 +1112,15 @@ static int FlashPoint_ProbeHostAdapter(struct sccb_mgr_info *pCardInfo)
|
|||
|
||||
if (pCardInfo->si_card_model[1] == '3') {
|
||||
if (RD_HARPOON(ioport + hp_ee_ctrl) & BIT(7))
|
||||
pCardInfo->si_flags |= LOW_BYTE_TERM;
|
||||
pCardInfo->si_mflags |= LOW_BYTE_TERM;
|
||||
} else if (pCardInfo->si_card_model[2] == '0') {
|
||||
temp = RD_HARPOON(ioport + hp_xfer_pad);
|
||||
WR_HARPOON(ioport + hp_xfer_pad, (temp & ~BIT(4)));
|
||||
if (RD_HARPOON(ioport + hp_ee_ctrl) & BIT(7))
|
||||
pCardInfo->si_flags |= LOW_BYTE_TERM;
|
||||
pCardInfo->si_mflags |= LOW_BYTE_TERM;
|
||||
WR_HARPOON(ioport + hp_xfer_pad, (temp | BIT(4)));
|
||||
if (RD_HARPOON(ioport + hp_ee_ctrl) & BIT(7))
|
||||
pCardInfo->si_flags |= HIGH_BYTE_TERM;
|
||||
pCardInfo->si_mflags |= HIGH_BYTE_TERM;
|
||||
WR_HARPOON(ioport + hp_xfer_pad, temp);
|
||||
} else {
|
||||
temp = RD_HARPOON(ioport + hp_ee_ctrl);
|
||||
|
@ -1166,9 +1138,9 @@ static int FlashPoint_ProbeHostAdapter(struct sccb_mgr_info *pCardInfo)
|
|||
WR_HARPOON(ioport + hp_ee_ctrl, temp);
|
||||
WR_HARPOON(ioport + hp_xfer_pad, temp2);
|
||||
if (!(temp3 & BIT(7)))
|
||||
pCardInfo->si_flags |= LOW_BYTE_TERM;
|
||||
pCardInfo->si_mflags |= LOW_BYTE_TERM;
|
||||
if (!(temp3 & BIT(6)))
|
||||
pCardInfo->si_flags |= HIGH_BYTE_TERM;
|
||||
pCardInfo->si_mflags |= HIGH_BYTE_TERM;
|
||||
}
|
||||
|
||||
ARAM_ACCESS(ioport);
|
||||
|
@ -1275,7 +1247,7 @@ static void *FlashPoint_HardwareResetHostAdapter(struct sccb_mgr_info
|
|||
WR_HARPOON(ioport + hp_arb_id, pCardInfo->si_id);
|
||||
CurrCard->ourId = pCardInfo->si_id;
|
||||
|
||||
i = (unsigned char)pCardInfo->si_flags;
|
||||
i = (unsigned char)pCardInfo->si_mflags;
|
||||
if (i & SCSI_PARITY_ENA)
|
||||
WR_HARPOON(ioport + hp_portctrl_1, (HOST_MODE8 | CHK_SCSI_P));
|
||||
|
||||
|
@ -1289,14 +1261,14 @@ static void *FlashPoint_HardwareResetHostAdapter(struct sccb_mgr_info
|
|||
j |= SCSI_TERM_ENA_H;
|
||||
WR_HARPOON(ioport + hp_ee_ctrl, j);
|
||||
|
||||
if (!(pCardInfo->si_flags & SOFT_RESET)) {
|
||||
if (!(pCardInfo->si_mflags & SOFT_RESET)) {
|
||||
|
||||
FPT_sresb(ioport, thisCard);
|
||||
|
||||
FPT_scini(thisCard, pCardInfo->si_id, 0);
|
||||
}
|
||||
|
||||
if (pCardInfo->si_flags & POST_ALL_UNDERRRUNS)
|
||||
if (pCardInfo->si_mflags & POST_ALL_UNDERRRUNS)
|
||||
CurrCard->globalFlags |= F_NO_FILTER;
|
||||
|
||||
if (pCurrNvRam) {
|
||||
|
@ -1660,7 +1632,7 @@ static int FlashPoint_AbortCCB(void *pCurrCard, struct sccb *p_Sccb)
|
|||
p_Sccb->Sccb_scsistat =
|
||||
ABORT_ST;
|
||||
p_Sccb->Sccb_scsimsg =
|
||||
SMABORT_TAG;
|
||||
ABORT_TASK;
|
||||
|
||||
if (((struct sccb_card *)
|
||||
pCurrCard)->currentSCCB ==
|
||||
|
@ -1812,7 +1784,7 @@ static int FlashPoint_HandleInterrupt(void *pcard)
|
|||
FPT_phaseChkFifo(ioport, thisCard);
|
||||
|
||||
if (RD_HARPOON(ioport + hp_gp_reg_1) ==
|
||||
SMSAVE_DATA_PTR) {
|
||||
SAVE_POINTERS) {
|
||||
|
||||
WR_HARPOON(ioport + hp_gp_reg_1, 0x00);
|
||||
currSCCB->Sccb_XferState |= F_NO_DATA_YET;
|
||||
|
@ -1865,7 +1837,7 @@ static int FlashPoint_HandleInterrupt(void *pcard)
|
|||
FPT_phaseChkFifo(ioport, thisCard);
|
||||
|
||||
if (RD_HARPOON(ioport + hp_gp_reg_1) ==
|
||||
SMSAVE_DATA_PTR) {
|
||||
SAVE_POINTERS) {
|
||||
WR_HARPOON(ioport + hp_gp_reg_1, 0x00);
|
||||
currSCCB->Sccb_XferState |=
|
||||
F_NO_DATA_YET;
|
||||
|
@ -2258,7 +2230,7 @@ static unsigned char FPT_sfm(u32 port, struct sccb *pCurrSCCB)
|
|||
WR_HARPOON(port + hp_fiforead, 0);
|
||||
WR_HARPOON(port + hp_fifowrite, 0);
|
||||
if (pCurrSCCB != NULL) {
|
||||
pCurrSCCB->Sccb_scsimsg = SMPARITY;
|
||||
pCurrSCCB->Sccb_scsimsg = MSG_PARITY_ERROR;
|
||||
}
|
||||
message = 0x00;
|
||||
do {
|
||||
|
@ -2411,7 +2383,7 @@ static void FPT_ssel(u32 port, unsigned char p_card)
|
|||
|
||||
WRW_HARPOON((port + ID_MSG_STRT + 2), BRH_OP + ALWAYS + NP);
|
||||
|
||||
currSCCB->Sccb_scsimsg = SMDEV_RESET;
|
||||
currSCCB->Sccb_scsimsg = TARGET_RESET;
|
||||
|
||||
WR_HARPOON(port + hp_autostart_3, (SELECT + SELCHK_STRT));
|
||||
auto_loaded = 1;
|
||||
|
@ -2758,9 +2730,9 @@ static void FPT_sres(u32 port, unsigned char p_card,
|
|||
if (message == 0) {
|
||||
msgRetryCount++;
|
||||
if (msgRetryCount == 1) {
|
||||
FPT_SendMsg(port, SMPARITY);
|
||||
FPT_SendMsg(port, MSG_PARITY_ERROR);
|
||||
} else {
|
||||
FPT_SendMsg(port, SMDEV_RESET);
|
||||
FPT_SendMsg(port, TARGET_RESET);
|
||||
|
||||
FPT_sssyncv(port, our_target, NARROW_SCSI,
|
||||
currTar_Info);
|
||||
|
@ -2860,8 +2832,8 @@ static void FPT_SendMsg(u32 port, unsigned char message)
|
|||
|
||||
WR_HARPOON(port + hp_portctrl_0, 0x00);
|
||||
|
||||
if ((message == SMABORT) || (message == SMDEV_RESET) ||
|
||||
(message == SMABORT_TAG)) {
|
||||
if ((message == ABORT_TASK_SET) || (message == TARGET_RESET) ||
|
||||
(message == ABORT_TASK)) {
|
||||
while (!
|
||||
(RDW_HARPOON((port + hp_intstat)) &
|
||||
(BUS_FREE | PHASE))) {
|
||||
|
@ -2893,7 +2865,7 @@ static void FPT_sdecm(unsigned char message, u32 port, unsigned char p_card)
|
|||
|
||||
currTar_Info = &FPT_sccbMgrTbl[p_card][currSCCB->TargID];
|
||||
|
||||
if (message == SMREST_DATA_PTR) {
|
||||
if (message == RESTORE_POINTERS) {
|
||||
if (!(currSCCB->Sccb_XferState & F_NO_DATA_YET)) {
|
||||
currSCCB->Sccb_ATC = currSCCB->Sccb_savedATC;
|
||||
|
||||
|
@ -2905,7 +2877,7 @@ static void FPT_sdecm(unsigned char message, u32 port, unsigned char p_card)
|
|||
(AUTO_IMMED + DISCONNECT_START));
|
||||
}
|
||||
|
||||
else if (message == SMCMD_COMP) {
|
||||
else if (message == COMMAND_COMPLETE) {
|
||||
|
||||
if (currSCCB->Sccb_scsistat == SELECT_Q_ST) {
|
||||
currTar_Info->TarStatus &=
|
||||
|
@ -2917,15 +2889,16 @@ static void FPT_sdecm(unsigned char message, u32 port, unsigned char p_card)
|
|||
|
||||
}
|
||||
|
||||
else if ((message == SMNO_OP) || (message >= SMIDENT)
|
||||
|| (message == SMINIT_RECOVERY) || (message == SMREL_RECOVERY)) {
|
||||
else if ((message == NOP) || (message >= IDENTIFY_BASE) ||
|
||||
(message == INITIATE_RECOVERY) ||
|
||||
(message == RELEASE_RECOVERY)) {
|
||||
|
||||
ACCEPT_MSG(port);
|
||||
WR_HARPOON(port + hp_autostart_1,
|
||||
(AUTO_IMMED + DISCONNECT_START));
|
||||
}
|
||||
|
||||
else if (message == SMREJECT) {
|
||||
else if (message == MESSAGE_REJECT) {
|
||||
|
||||
if ((currSCCB->Sccb_scsistat == SELECT_SN_ST) ||
|
||||
(currSCCB->Sccb_scsistat == SELECT_WN_ST) ||
|
||||
|
@ -3026,19 +2999,19 @@ static void FPT_sdecm(unsigned char message, u32 port, unsigned char p_card)
|
|||
}
|
||||
}
|
||||
|
||||
else if (message == SMEXT) {
|
||||
else if (message == EXTENDED_MESSAGE) {
|
||||
|
||||
ACCEPT_MSG(port);
|
||||
FPT_shandem(port, p_card, currSCCB);
|
||||
}
|
||||
|
||||
else if (message == SMIGNORWR) {
|
||||
else if (message == IGNORE_WIDE_RESIDUE) {
|
||||
|
||||
ACCEPT_MSG(port); /* ACK the RESIDUE MSG */
|
||||
|
||||
message = FPT_sfm(port, currSCCB);
|
||||
|
||||
if (currSCCB->Sccb_scsimsg != SMPARITY)
|
||||
if (currSCCB->Sccb_scsimsg != MSG_PARITY_ERROR)
|
||||
ACCEPT_MSG(port);
|
||||
WR_HARPOON(port + hp_autostart_1,
|
||||
(AUTO_IMMED + DISCONNECT_START));
|
||||
|
@ -3047,7 +3020,7 @@ static void FPT_sdecm(unsigned char message, u32 port, unsigned char p_card)
|
|||
else {
|
||||
|
||||
currSCCB->HostStatus = SCCB_PHASE_SEQUENCE_FAIL;
|
||||
currSCCB->Sccb_scsimsg = SMREJECT;
|
||||
currSCCB->Sccb_scsimsg = MESSAGE_REJECT;
|
||||
|
||||
ACCEPT_MSG_ATN(port);
|
||||
WR_HARPOON(port + hp_autostart_1,
|
||||
|
@ -3073,7 +3046,7 @@ static void FPT_shandem(u32 port, unsigned char p_card, struct sccb *pCurrSCCB)
|
|||
message = FPT_sfm(port, pCurrSCCB);
|
||||
if (message) {
|
||||
|
||||
if (message == SMSYNC) {
|
||||
if (message == EXTENDED_SDTR) {
|
||||
|
||||
if (length == 0x03) {
|
||||
|
||||
|
@ -3081,10 +3054,10 @@ static void FPT_shandem(u32 port, unsigned char p_card, struct sccb *pCurrSCCB)
|
|||
FPT_stsyncn(port, p_card);
|
||||
} else {
|
||||
|
||||
pCurrSCCB->Sccb_scsimsg = SMREJECT;
|
||||
pCurrSCCB->Sccb_scsimsg = MESSAGE_REJECT;
|
||||
ACCEPT_MSG_ATN(port);
|
||||
}
|
||||
} else if (message == SMWDTR) {
|
||||
} else if (message == EXTENDED_WDTR) {
|
||||
|
||||
if (length == 0x02) {
|
||||
|
||||
|
@ -3092,7 +3065,7 @@ static void FPT_shandem(u32 port, unsigned char p_card, struct sccb *pCurrSCCB)
|
|||
FPT_stwidn(port, p_card);
|
||||
} else {
|
||||
|
||||
pCurrSCCB->Sccb_scsimsg = SMREJECT;
|
||||
pCurrSCCB->Sccb_scsimsg = MESSAGE_REJECT;
|
||||
ACCEPT_MSG_ATN(port);
|
||||
|
||||
WR_HARPOON(port + hp_autostart_1,
|
||||
|
@ -3101,20 +3074,20 @@ static void FPT_shandem(u32 port, unsigned char p_card, struct sccb *pCurrSCCB)
|
|||
}
|
||||
} else {
|
||||
|
||||
pCurrSCCB->Sccb_scsimsg = SMREJECT;
|
||||
pCurrSCCB->Sccb_scsimsg = MESSAGE_REJECT;
|
||||
ACCEPT_MSG_ATN(port);
|
||||
|
||||
WR_HARPOON(port + hp_autostart_1,
|
||||
(AUTO_IMMED + DISCONNECT_START));
|
||||
}
|
||||
} else {
|
||||
if (pCurrSCCB->Sccb_scsimsg != SMPARITY)
|
||||
if (pCurrSCCB->Sccb_scsimsg != MSG_PARITY_ERROR)
|
||||
ACCEPT_MSG(port);
|
||||
WR_HARPOON(port + hp_autostart_1,
|
||||
(AUTO_IMMED + DISCONNECT_START));
|
||||
}
|
||||
} else {
|
||||
if (pCurrSCCB->Sccb_scsimsg == SMPARITY)
|
||||
if (pCurrSCCB->Sccb_scsimsg == MSG_PARITY_ERROR)
|
||||
WR_HARPOON(port + hp_autostart_1,
|
||||
(AUTO_IMMED + DISCONNECT_START));
|
||||
}
|
||||
|
@ -3148,10 +3121,10 @@ static unsigned char FPT_sisyncn(u32 port, unsigned char p_card,
|
|||
WRW_HARPOON((port + ID_MSG_STRT + 2), BRH_OP + ALWAYS + CMDPZ);
|
||||
|
||||
WRW_HARPOON((port + SYNC_MSGS + 0),
|
||||
(MPM_OP + AMSG_OUT + SMEXT));
|
||||
(MPM_OP + AMSG_OUT + EXTENDED_MESSAGE));
|
||||
WRW_HARPOON((port + SYNC_MSGS + 2), (MPM_OP + AMSG_OUT + 0x03));
|
||||
WRW_HARPOON((port + SYNC_MSGS + 4),
|
||||
(MPM_OP + AMSG_OUT + SMSYNC));
|
||||
(MPM_OP + AMSG_OUT + EXTENDED_SDTR));
|
||||
|
||||
if ((currTar_Info->TarEEValue & EE_SYNC_MASK) == EE_SYNC_20MB)
|
||||
|
||||
|
@ -3221,7 +3194,7 @@ static void FPT_stsyncn(u32 port, unsigned char p_card)
|
|||
|
||||
sync_msg = FPT_sfm(port, currSCCB);
|
||||
|
||||
if ((sync_msg == 0x00) && (currSCCB->Sccb_scsimsg == SMPARITY)) {
|
||||
if ((sync_msg == 0x00) && (currSCCB->Sccb_scsimsg == MSG_PARITY_ERROR)) {
|
||||
WR_HARPOON(port + hp_autostart_1,
|
||||
(AUTO_IMMED + DISCONNECT_START));
|
||||
return;
|
||||
|
@ -3231,7 +3204,7 @@ static void FPT_stsyncn(u32 port, unsigned char p_card)
|
|||
|
||||
offset = FPT_sfm(port, currSCCB);
|
||||
|
||||
if ((offset == 0x00) && (currSCCB->Sccb_scsimsg == SMPARITY)) {
|
||||
if ((offset == 0x00) && (currSCCB->Sccb_scsimsg == MSG_PARITY_ERROR)) {
|
||||
WR_HARPOON(port + hp_autostart_1,
|
||||
(AUTO_IMMED + DISCONNECT_START));
|
||||
return;
|
||||
|
@ -3343,9 +3316,11 @@ static void FPT_sisyncr(u32 port, unsigned char sync_pulse,
|
|||
unsigned char offset)
|
||||
{
|
||||
ARAM_ACCESS(port);
|
||||
WRW_HARPOON((port + SYNC_MSGS + 0), (MPM_OP + AMSG_OUT + SMEXT));
|
||||
WRW_HARPOON((port + SYNC_MSGS + 0),
|
||||
(MPM_OP + AMSG_OUT + EXTENDED_MESSAGE));
|
||||
WRW_HARPOON((port + SYNC_MSGS + 2), (MPM_OP + AMSG_OUT + 0x03));
|
||||
WRW_HARPOON((port + SYNC_MSGS + 4), (MPM_OP + AMSG_OUT + SMSYNC));
|
||||
WRW_HARPOON((port + SYNC_MSGS + 4),
|
||||
(MPM_OP + AMSG_OUT + EXTENDED_SDTR));
|
||||
WRW_HARPOON((port + SYNC_MSGS + 6), (MPM_OP + AMSG_OUT + sync_pulse));
|
||||
WRW_HARPOON((port + SYNC_MSGS + 8), (RAT_OP));
|
||||
WRW_HARPOON((port + SYNC_MSGS + 10), (MPM_OP + AMSG_OUT + offset));
|
||||
|
@ -3388,10 +3363,10 @@ static unsigned char FPT_siwidn(u32 port, unsigned char p_card)
|
|||
WRW_HARPOON((port + ID_MSG_STRT + 2), BRH_OP + ALWAYS + CMDPZ);
|
||||
|
||||
WRW_HARPOON((port + SYNC_MSGS + 0),
|
||||
(MPM_OP + AMSG_OUT + SMEXT));
|
||||
(MPM_OP + AMSG_OUT + EXTENDED_MESSAGE));
|
||||
WRW_HARPOON((port + SYNC_MSGS + 2), (MPM_OP + AMSG_OUT + 0x02));
|
||||
WRW_HARPOON((port + SYNC_MSGS + 4),
|
||||
(MPM_OP + AMSG_OUT + SMWDTR));
|
||||
(MPM_OP + AMSG_OUT + EXTENDED_WDTR));
|
||||
WRW_HARPOON((port + SYNC_MSGS + 6), (RAT_OP));
|
||||
WRW_HARPOON((port + SYNC_MSGS + 8),
|
||||
(MPM_OP + AMSG_OUT + SM16BIT));
|
||||
|
@ -3436,7 +3411,7 @@ static void FPT_stwidn(u32 port, unsigned char p_card)
|
|||
|
||||
width = FPT_sfm(port, currSCCB);
|
||||
|
||||
if ((width == 0x00) && (currSCCB->Sccb_scsimsg == SMPARITY)) {
|
||||
if ((width == 0x00) && (currSCCB->Sccb_scsimsg == MSG_PARITY_ERROR)) {
|
||||
WR_HARPOON(port + hp_autostart_1,
|
||||
(AUTO_IMMED + DISCONNECT_START));
|
||||
return;
|
||||
|
@ -3499,9 +3474,11 @@ static void FPT_stwidn(u32 port, unsigned char p_card)
|
|||
static void FPT_siwidr(u32 port, unsigned char width)
|
||||
{
|
||||
ARAM_ACCESS(port);
|
||||
WRW_HARPOON((port + SYNC_MSGS + 0), (MPM_OP + AMSG_OUT + SMEXT));
|
||||
WRW_HARPOON((port + SYNC_MSGS + 0),
|
||||
(MPM_OP + AMSG_OUT + EXTENDED_MESSAGE));
|
||||
WRW_HARPOON((port + SYNC_MSGS + 2), (MPM_OP + AMSG_OUT + 0x02));
|
||||
WRW_HARPOON((port + SYNC_MSGS + 4), (MPM_OP + AMSG_OUT + SMWDTR));
|
||||
WRW_HARPOON((port + SYNC_MSGS + 4),
|
||||
(MPM_OP + AMSG_OUT + EXTENDED_WDTR));
|
||||
WRW_HARPOON((port + SYNC_MSGS + 6), (RAT_OP));
|
||||
WRW_HARPOON((port + SYNC_MSGS + 8), (MPM_OP + AMSG_OUT + width));
|
||||
WRW_HARPOON((port + SYNC_MSGS + 10), (BRH_OP + ALWAYS + NP));
|
||||
|
@ -3682,7 +3659,7 @@ static void FPT_ssenss(struct sccb_card *pCurrCard)
|
|||
}
|
||||
|
||||
currSCCB->CdbLength = SIX_BYTE_CMD;
|
||||
currSCCB->Cdb[0] = SCSI_REQUEST_SENSE;
|
||||
currSCCB->Cdb[0] = REQUEST_SENSE;
|
||||
currSCCB->Cdb[1] = currSCCB->Cdb[1] & (unsigned char)0xE0; /*Keep LUN. */
|
||||
currSCCB->Cdb[2] = 0x00;
|
||||
currSCCB->Cdb[3] = 0x00;
|
||||
|
@ -3939,13 +3916,9 @@ static void FPT_sinits(struct sccb *p_sccb, unsigned char p_card)
|
|||
*/
|
||||
if ((currTar_Info->TarStatus & TAR_ALLOW_DISC) ||
|
||||
(currTar_Info->TarStatus & TAG_Q_TRYING)) {
|
||||
p_sccb->Sccb_idmsg =
|
||||
(unsigned char)(SMIDENT | DISC_PRIV) | p_sccb->Lun;
|
||||
}
|
||||
|
||||
else {
|
||||
|
||||
p_sccb->Sccb_idmsg = (unsigned char)SMIDENT | p_sccb->Lun;
|
||||
p_sccb->Sccb_idmsg = IDENTIFY(true, p_sccb->Lun);
|
||||
} else {
|
||||
p_sccb->Sccb_idmsg = IDENTIFY(false, p_sccb->Lun);
|
||||
}
|
||||
|
||||
p_sccb->HostStatus = 0x00;
|
||||
|
@ -3962,7 +3935,7 @@ static void FPT_sinits(struct sccb *p_sccb, unsigned char p_card)
|
|||
*/
|
||||
p_sccb->Sccb_scsistat = BUS_FREE_ST;
|
||||
p_sccb->SccbStatus = SCCB_IN_PROCESS;
|
||||
p_sccb->Sccb_scsimsg = SMNO_OP;
|
||||
p_sccb->Sccb_scsimsg = NOP;
|
||||
|
||||
}
|
||||
|
||||
|
@ -4167,7 +4140,7 @@ static void FPT_phaseMsgOut(u32 port, unsigned char p_card)
|
|||
message = currSCCB->Sccb_scsimsg;
|
||||
scsiID = currSCCB->TargID;
|
||||
|
||||
if (message == SMDEV_RESET) {
|
||||
if (message == TARGET_RESET) {
|
||||
|
||||
currTar_Info = &FPT_sccbMgrTbl[p_card][scsiID];
|
||||
currTar_Info->TarSyncCtrl = 0;
|
||||
|
@ -4203,7 +4176,7 @@ static void FPT_phaseMsgOut(u32 port, unsigned char p_card)
|
|||
|
||||
else if (currSCCB->Sccb_scsistat < COMMAND_ST) {
|
||||
|
||||
if (message == SMNO_OP) {
|
||||
if (message == NOP) {
|
||||
currSCCB->Sccb_MGRFlags |= F_DEV_SELECTED;
|
||||
|
||||
FPT_ssel(port, p_card);
|
||||
|
@ -4211,13 +4184,13 @@ static void FPT_phaseMsgOut(u32 port, unsigned char p_card)
|
|||
}
|
||||
} else {
|
||||
|
||||
if (message == SMABORT)
|
||||
if (message == ABORT_TASK_SET)
|
||||
|
||||
FPT_queueFlushSccb(p_card, SCCB_COMPLETE);
|
||||
}
|
||||
|
||||
} else {
|
||||
message = SMABORT;
|
||||
message = ABORT_TASK_SET;
|
||||
}
|
||||
|
||||
WRW_HARPOON((port + hp_intstat), (BUS_FREE | PHASE | XFER_CNT_0));
|
||||
|
@ -4232,8 +4205,8 @@ static void FPT_phaseMsgOut(u32 port, unsigned char p_card)
|
|||
|
||||
WR_HARPOON(port + hp_portctrl_0, 0x00);
|
||||
|
||||
if ((message == SMABORT) || (message == SMDEV_RESET) ||
|
||||
(message == SMABORT_TAG)) {
|
||||
if ((message == ABORT_TASK_SET) || (message == TARGET_RESET) ||
|
||||
(message == ABORT_TASK)) {
|
||||
|
||||
while (!(RDW_HARPOON((port + hp_intstat)) & (BUS_FREE | PHASE))) {
|
||||
}
|
||||
|
@ -4275,8 +4248,8 @@ static void FPT_phaseMsgOut(u32 port, unsigned char p_card)
|
|||
|
||||
else {
|
||||
|
||||
if (message == SMPARITY) {
|
||||
currSCCB->Sccb_scsimsg = SMNO_OP;
|
||||
if (message == MSG_PARITY_ERROR) {
|
||||
currSCCB->Sccb_scsimsg = NOP;
|
||||
WR_HARPOON(port + hp_autostart_1,
|
||||
(AUTO_IMMED + DISCONNECT_START));
|
||||
} else {
|
||||
|
@ -4306,7 +4279,7 @@ static void FPT_phaseMsgIn(u32 port, unsigned char p_card)
|
|||
}
|
||||
|
||||
message = RD_HARPOON(port + hp_scsidata_0);
|
||||
if ((message == SMDISC) || (message == SMSAVE_DATA_PTR)) {
|
||||
if ((message == DISCONNECT) || (message == SAVE_POINTERS)) {
|
||||
|
||||
WR_HARPOON(port + hp_autostart_1,
|
||||
(AUTO_IMMED + END_DATA_START));
|
||||
|
@ -4321,7 +4294,7 @@ static void FPT_phaseMsgIn(u32 port, unsigned char p_card)
|
|||
FPT_sdecm(message, port, p_card);
|
||||
|
||||
} else {
|
||||
if (currSCCB->Sccb_scsimsg != SMPARITY)
|
||||
if (currSCCB->Sccb_scsimsg != MSG_PARITY_ERROR)
|
||||
ACCEPT_MSG(port);
|
||||
WR_HARPOON(port + hp_autostart_1,
|
||||
(AUTO_IMMED + DISCONNECT_START));
|
||||
|
@ -4351,7 +4324,7 @@ static void FPT_phaseIllegal(u32 port, unsigned char p_card)
|
|||
|
||||
currSCCB->HostStatus = SCCB_PHASE_SEQUENCE_FAIL;
|
||||
currSCCB->Sccb_scsistat = ABORT_ST;
|
||||
currSCCB->Sccb_scsimsg = SMABORT;
|
||||
currSCCB->Sccb_scsimsg = ABORT_TASK_SET;
|
||||
}
|
||||
|
||||
ACCEPT_MSG_ATN(port);
|
||||
|
@ -4650,9 +4623,9 @@ static void FPT_autoCmdCmplt(u32 p_port, unsigned char p_card)
|
|||
|
||||
FPT_sccbMgrTbl[p_card][currSCCB->TargID].TarLUN_CA = 0;
|
||||
|
||||
if (status_byte != SSGOOD) {
|
||||
if (status_byte != SAM_STAT_GOOD) {
|
||||
|
||||
if (status_byte == SSQ_FULL) {
|
||||
if (status_byte == SAM_STAT_TASK_SET_FULL) {
|
||||
|
||||
if (((FPT_BL_Card[p_card].globalFlags & F_CONLUN_IO) &&
|
||||
((FPT_sccbMgrTbl[p_card][currSCCB->TargID].
|
||||
|
@ -4784,7 +4757,7 @@ static void FPT_autoCmdCmplt(u32 p_port, unsigned char p_card)
|
|||
|
||||
}
|
||||
|
||||
if (status_byte == SSCHECK) {
|
||||
if (status_byte == SAM_STAT_CHECK_CONDITION) {
|
||||
if (FPT_BL_Card[p_card].globalFlags & F_DO_RENEGO) {
|
||||
if (FPT_sccbMgrTbl[p_card][currSCCB->TargID].
|
||||
TarEEValue & EE_SYNC_MASK) {
|
||||
|
@ -4806,7 +4779,7 @@ static void FPT_autoCmdCmplt(u32 p_port, unsigned char p_card)
|
|||
currSCCB->SccbStatus = SCCB_ERROR;
|
||||
currSCCB->TargetStatus = status_byte;
|
||||
|
||||
if (status_byte == SSCHECK) {
|
||||
if (status_byte == SAM_STAT_CHECK_CONDITION) {
|
||||
|
||||
FPT_sccbMgrTbl[p_card][currSCCB->TargID].
|
||||
TarLUN_CA = 1;
|
||||
|
@ -6868,14 +6841,14 @@ static void FPT_queueCmdComplete(struct sccb_card *pCurrCard,
|
|||
if ((p_sccb->
|
||||
ControlByte & (SCCB_DATA_XFER_OUT | SCCB_DATA_XFER_IN))
|
||||
&& (p_sccb->HostStatus == SCCB_COMPLETE)
|
||||
&& (p_sccb->TargetStatus != SSCHECK))
|
||||
&& (p_sccb->TargetStatus != SAM_STAT_CHECK_CONDITION))
|
||||
|
||||
if ((SCSIcmd == SCSI_READ) ||
|
||||
(SCSIcmd == SCSI_WRITE) ||
|
||||
(SCSIcmd == SCSI_READ_EXTENDED) ||
|
||||
(SCSIcmd == SCSI_WRITE_EXTENDED) ||
|
||||
(SCSIcmd == SCSI_WRITE_AND_VERIFY) ||
|
||||
(SCSIcmd == SCSI_START_STOP_UNIT) ||
|
||||
if ((SCSIcmd == READ_6) ||
|
||||
(SCSIcmd == WRITE_6) ||
|
||||
(SCSIcmd == READ_10) ||
|
||||
(SCSIcmd == WRITE_10) ||
|
||||
(SCSIcmd == WRITE_VERIFY) ||
|
||||
(SCSIcmd == START_STOP) ||
|
||||
(pCurrCard->globalFlags & F_NO_FILTER)
|
||||
)
|
||||
p_sccb->HostStatus = SCCB_DATA_UNDER_RUN;
|
||||
|
|
|
@ -235,6 +235,19 @@ config SCSI_FC_ATTRS
|
|||
each attached FiberChannel device to sysfs, say Y.
|
||||
Otherwise, say N.
|
||||
|
||||
config FC_APPID
|
||||
bool "Enable support to track FC I/O Traffic"
|
||||
depends on BLOCK && BLK_CGROUP
|
||||
depends on SCSI
|
||||
select BLK_CGROUP_FC_APPID
|
||||
default y
|
||||
help
|
||||
If you say Y here, it enables the support to track
|
||||
FC I/O traffic over fabric. It enables the Fabric and the
|
||||
storage targets to identify, monitor, and handle FC traffic
|
||||
based on VM tags by inserting application specific
|
||||
identification into the FC frame.
|
||||
|
||||
config SCSI_ISCSI_ATTRS
|
||||
tristate "iSCSI Transport Attributes"
|
||||
depends on SCSI && NET
|
||||
|
@ -311,7 +324,7 @@ source "drivers/scsi/cxlflash/Kconfig"
|
|||
config SGIWD93_SCSI
|
||||
tristate "SGI WD93C93 SCSI Driver"
|
||||
depends on SGI_HAS_WD93 && SCSI
|
||||
help
|
||||
help
|
||||
If you have a Western Digital WD93 SCSI controller on
|
||||
an SGI MIPS system, say Y. Otherwise, say N.
|
||||
|
||||
|
@ -482,6 +495,7 @@ config SCSI_ARCMSR
|
|||
source "drivers/scsi/esas2r/Kconfig"
|
||||
source "drivers/scsi/megaraid/Kconfig.megaraid"
|
||||
source "drivers/scsi/mpt3sas/Kconfig"
|
||||
source "drivers/scsi/mpi3mr/Kconfig"
|
||||
source "drivers/scsi/smartpqi/Kconfig"
|
||||
source "drivers/scsi/ufs/Kconfig"
|
||||
|
||||
|
@ -1157,6 +1171,8 @@ config SCSI_LPFC_DEBUG_FS
|
|||
This makes debugging information from the lpfc driver
|
||||
available via the debugfs filesystem.
|
||||
|
||||
source "drivers/scsi/elx/Kconfig"
|
||||
|
||||
config SCSI_SIM710
|
||||
tristate "Simple 53c710 SCSI support (Compaq, NCR machines)"
|
||||
depends on EISA && SCSI
|
||||
|
|
|
@ -85,6 +85,7 @@ obj-$(CONFIG_SCSI_QLOGIC_1280) += qla1280.o
|
|||
obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx/
|
||||
obj-$(CONFIG_SCSI_QLA_ISCSI) += libiscsi.o qla4xxx/
|
||||
obj-$(CONFIG_SCSI_LPFC) += lpfc/
|
||||
obj-$(CONFIG_SCSI_EFCT) += elx/
|
||||
obj-$(CONFIG_SCSI_BFA_FC) += bfa/
|
||||
obj-$(CONFIG_SCSI_CHELSIO_FCOE) += csiostor/
|
||||
obj-$(CONFIG_SCSI_DMX3191D) += dmx3191d.o
|
||||
|
@ -99,6 +100,7 @@ obj-$(CONFIG_MEGARAID_LEGACY) += megaraid.o
|
|||
obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/
|
||||
obj-$(CONFIG_MEGARAID_SAS) += megaraid/
|
||||
obj-$(CONFIG_SCSI_MPT3SAS) += mpt3sas/
|
||||
obj-$(CONFIG_SCSI_MPI3MR) += mpi3mr/
|
||||
obj-$(CONFIG_SCSI_UFSHCD) += ufs/
|
||||
obj-$(CONFIG_SCSI_ACARD) += atp870u.o
|
||||
obj-$(CONFIG_SCSI_SUNESP) += esp_scsi.o sun_esp.o
|
||||
|
|
|
@ -538,11 +538,11 @@ static void complete_cmd(struct Scsi_Host *instance,
|
|||
|
||||
if (hostdata->sensing == cmd) {
|
||||
/* Autosense processing ends here */
|
||||
if (status_byte(cmd->result) != GOOD) {
|
||||
if (get_status_byte(cmd) != SAM_STAT_GOOD) {
|
||||
scsi_eh_restore_cmnd(cmd, &hostdata->ses);
|
||||
} else {
|
||||
scsi_eh_restore_cmnd(cmd, &hostdata->ses);
|
||||
set_driver_byte(cmd, DRIVER_SENSE);
|
||||
set_status_byte(cmd, SAM_STAT_CHECK_CONDITION);
|
||||
}
|
||||
hostdata->sensing = NULL;
|
||||
}
|
||||
|
@ -1815,6 +1815,8 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
|
|||
|
||||
switch (tmp) {
|
||||
case ABORT:
|
||||
set_host_byte(cmd, DID_ABORT);
|
||||
fallthrough;
|
||||
case COMMAND_COMPLETE:
|
||||
/* Accept message by clearing ACK */
|
||||
sink = 1;
|
||||
|
@ -1826,9 +1828,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance)
|
|||
hostdata->connected = NULL;
|
||||
hostdata->busy[scmd_id(cmd)] &= ~(1 << cmd->device->lun);
|
||||
|
||||
cmd->result &= ~0xffff;
|
||||
cmd->result |= cmd->SCp.Status;
|
||||
cmd->result |= cmd->SCp.Message << 8;
|
||||
set_status_byte(cmd, cmd->SCp.Status);
|
||||
|
||||
set_resid_from_SCp(cmd);
|
||||
|
||||
|
|
|
@ -1235,8 +1235,8 @@ static int aac_read_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u3
|
|||
if (ret < 0)
|
||||
return ret;
|
||||
command = ContainerRawIo2;
|
||||
fibsize = sizeof(struct aac_raw_io2) +
|
||||
((le32_to_cpu(readcmd2->sgeCnt)-1) * sizeof(struct sge_ieee1212));
|
||||
fibsize = struct_size(readcmd2, sge,
|
||||
le32_to_cpu(readcmd2->sgeCnt));
|
||||
} else {
|
||||
struct aac_raw_io *readcmd;
|
||||
readcmd = (struct aac_raw_io *) fib_data(fib);
|
||||
|
@ -1366,8 +1366,8 @@ static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u
|
|||
if (ret < 0)
|
||||
return ret;
|
||||
command = ContainerRawIo2;
|
||||
fibsize = sizeof(struct aac_raw_io2) +
|
||||
((le32_to_cpu(writecmd2->sgeCnt)-1) * sizeof(struct sge_ieee1212));
|
||||
fibsize = struct_size(writecmd2, sge,
|
||||
le32_to_cpu(writecmd2->sgeCnt));
|
||||
} else {
|
||||
struct aac_raw_io *writecmd;
|
||||
writecmd = (struct aac_raw_io *) fib_data(fib);
|
||||
|
@ -3998,7 +3998,7 @@ static int aac_convert_sgraw2(struct aac_raw_io2 *rio2, int pages, int nseg, int
|
|||
if (aac_convert_sgl == 0)
|
||||
return 0;
|
||||
|
||||
sge = kmalloc_array(nseg_new, sizeof(struct sge_ieee1212), GFP_ATOMIC);
|
||||
sge = kmalloc_array(nseg_new, sizeof(*sge), GFP_ATOMIC);
|
||||
if (sge == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -1929,7 +1929,7 @@ struct aac_raw_io2 {
|
|||
u8 bpComplete; /* reserved for F/W use */
|
||||
u8 sgeFirstIndex; /* reserved for F/W use */
|
||||
u8 unused[4];
|
||||
struct sge_ieee1212 sge[1];
|
||||
struct sge_ieee1212 sge[];
|
||||
};
|
||||
|
||||
#define CT_FLUSH_CACHE 129
|
||||
|
|
|
@ -5964,7 +5964,6 @@ static void adv_isr_callback(ADV_DVC_VAR *adv_dvc_varp, ADV_SCSI_REQ_Q *scsiqp)
|
|||
ASC_DBG(2, "SAM_STAT_CHECK_CONDITION\n");
|
||||
ASC_DBG_PRT_SENSE(2, scp->sense_buffer,
|
||||
SCSI_SENSE_BUFFERSIZE);
|
||||
set_driver_byte(scp, DRIVER_SENSE);
|
||||
}
|
||||
break;
|
||||
|
||||
|
@ -6715,7 +6714,6 @@ static void asc_isr_callback(ASC_DVC_VAR *asc_dvc_varp, ASC_QDONE_INFO *qdonep)
|
|||
ASC_DBG(2, "SAM_STAT_CHECK_CONDITION\n");
|
||||
ASC_DBG_PRT_SENSE(2, scp->sense_buffer,
|
||||
SCSI_SENSE_BUFFERSIZE);
|
||||
set_driver_byte(scp, DRIVER_SENSE);
|
||||
}
|
||||
break;
|
||||
|
||||
|
@ -6730,14 +6728,12 @@ static void asc_isr_callback(ASC_DVC_VAR *asc_dvc_varp, ASC_QDONE_INFO *qdonep)
|
|||
case QD_ABORTED_BY_HOST:
|
||||
ASC_DBG(1, "QD_ABORTED_BY_HOST\n");
|
||||
set_status_byte(scp, qdonep->d3.scsi_stat);
|
||||
set_msg_byte(scp, qdonep->d3.scsi_msg);
|
||||
set_host_byte(scp, DID_ABORT);
|
||||
break;
|
||||
|
||||
default:
|
||||
ASC_DBG(1, "done_stat 0x%x\n", qdonep->d3.done_stat);
|
||||
set_status_byte(scp, qdonep->d3.scsi_stat);
|
||||
set_msg_byte(scp, qdonep->d3.scsi_msg);
|
||||
set_host_byte(scp, DID_ERROR);
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -619,7 +619,8 @@ static struct {
|
|||
static irqreturn_t intr(int irq, void *dev_id);
|
||||
static void reset_ports(struct Scsi_Host *shpnt);
|
||||
static void aha152x_error(struct Scsi_Host *shpnt, char *msg);
|
||||
static void done(struct Scsi_Host *shpnt, int error);
|
||||
static void done(struct Scsi_Host *shpnt, unsigned char status_byte,
|
||||
unsigned char host_byte);
|
||||
|
||||
/* diagnostics */
|
||||
static void show_command(struct scsi_cmnd * ptr);
|
||||
|
@ -1271,7 +1272,8 @@ static int aha152x_biosparam(struct scsi_device *sdev, struct block_device *bdev
|
|||
* Internal done function
|
||||
*
|
||||
*/
|
||||
static void done(struct Scsi_Host *shpnt, int error)
|
||||
static void done(struct Scsi_Host *shpnt, unsigned char status_byte,
|
||||
unsigned char host_byte)
|
||||
{
|
||||
if (CURRENT_SC) {
|
||||
if(DONE_SC)
|
||||
|
@ -1281,7 +1283,8 @@ static void done(struct Scsi_Host *shpnt, int error)
|
|||
|
||||
DONE_SC = CURRENT_SC;
|
||||
CURRENT_SC = NULL;
|
||||
DONE_SC->result = error;
|
||||
set_status_byte(DONE_SC, status_byte);
|
||||
set_host_byte(DONE_SC, host_byte);
|
||||
} else
|
||||
printk(KERN_ERR "aha152x: done() called outside of command\n");
|
||||
}
|
||||
|
@ -1376,13 +1379,13 @@ static void busfree_run(struct Scsi_Host *shpnt)
|
|||
|
||||
if(CURRENT_SC->SCp.phase & completed) {
|
||||
/* target sent COMMAND COMPLETE */
|
||||
done(shpnt, (CURRENT_SC->SCp.Status & 0xff) | ((CURRENT_SC->SCp.Message & 0xff) << 8) | (DID_OK << 16));
|
||||
done(shpnt, CURRENT_SC->SCp.Status, DID_OK);
|
||||
|
||||
} else if(CURRENT_SC->SCp.phase & aborted) {
|
||||
done(shpnt, (CURRENT_SC->SCp.Status & 0xff) | ((CURRENT_SC->SCp.Message & 0xff) << 8) | (DID_ABORT << 16));
|
||||
done(shpnt, CURRENT_SC->SCp.Status, DID_ABORT);
|
||||
|
||||
} else if(CURRENT_SC->SCp.phase & resetted) {
|
||||
done(shpnt, (CURRENT_SC->SCp.Status & 0xff) | ((CURRENT_SC->SCp.Message & 0xff) << 8) | (DID_RESET << 16));
|
||||
done(shpnt, CURRENT_SC->SCp.Status, DID_RESET);
|
||||
|
||||
} else if(CURRENT_SC->SCp.phase & disconnected) {
|
||||
/* target sent DISCONNECT */
|
||||
|
@ -1394,7 +1397,7 @@ static void busfree_run(struct Scsi_Host *shpnt)
|
|||
CURRENT_SC = NULL;
|
||||
|
||||
} else {
|
||||
done(shpnt, DID_ERROR << 16);
|
||||
done(shpnt, SAM_STAT_GOOD, DID_ERROR);
|
||||
}
|
||||
#if defined(AHA152X_STAT)
|
||||
} else {
|
||||
|
@ -1515,7 +1518,7 @@ static void seldo_run(struct Scsi_Host *shpnt)
|
|||
if (TESTLO(SSTAT0, SELDO)) {
|
||||
scmd_printk(KERN_ERR, CURRENT_SC,
|
||||
"aha152x: passing bus free condition\n");
|
||||
done(shpnt, DID_NO_CONNECT << 16);
|
||||
done(shpnt, SAM_STAT_GOOD, DID_NO_CONNECT);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -1552,12 +1555,12 @@ static void selto_run(struct Scsi_Host *shpnt)
|
|||
CURRENT_SC->SCp.phase &= ~selecting;
|
||||
|
||||
if (CURRENT_SC->SCp.phase & aborted)
|
||||
done(shpnt, DID_ABORT << 16);
|
||||
done(shpnt, SAM_STAT_GOOD, DID_ABORT);
|
||||
else if (TESTLO(SSTAT0, SELINGO))
|
||||
done(shpnt, DID_BUS_BUSY << 16);
|
||||
done(shpnt, SAM_STAT_GOOD, DID_BUS_BUSY);
|
||||
else
|
||||
/* ARBITRATION won, but SELECTION failed */
|
||||
done(shpnt, DID_NO_CONNECT << 16);
|
||||
done(shpnt, SAM_STAT_GOOD, DID_NO_CONNECT);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1891,7 +1894,7 @@ static void cmd_init(struct Scsi_Host *shpnt)
|
|||
if (CURRENT_SC->SCp.sent_command) {
|
||||
scmd_printk(KERN_ERR, CURRENT_SC,
|
||||
"command already sent\n");
|
||||
done(shpnt, DID_ERROR << 16);
|
||||
done(shpnt, SAM_STAT_GOOD, DID_ERROR);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -2231,7 +2234,7 @@ static int update_state(struct Scsi_Host *shpnt)
|
|||
static void parerr_run(struct Scsi_Host *shpnt)
|
||||
{
|
||||
scmd_printk(KERN_ERR, CURRENT_SC, "parity error\n");
|
||||
done(shpnt, DID_PARITY << 16);
|
||||
done(shpnt, SAM_STAT_GOOD, DID_PARITY);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2254,7 +2257,7 @@ static void rsti_run(struct Scsi_Host *shpnt)
|
|||
kfree(ptr->host_scribble);
|
||||
ptr->host_scribble=NULL;
|
||||
|
||||
ptr->result = DID_RESET << 16;
|
||||
set_host_byte(ptr, DID_RESET);
|
||||
ptr->scsi_done(ptr);
|
||||
}
|
||||
|
||||
|
@ -2262,7 +2265,7 @@ static void rsti_run(struct Scsi_Host *shpnt)
|
|||
}
|
||||
|
||||
if(CURRENT_SC && !CURRENT_SC->device->soft_reset)
|
||||
done(shpnt, DID_RESET << 16 );
|
||||
done(shpnt, SAM_STAT_GOOD, DID_RESET);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -267,8 +267,11 @@ static irqreturn_t aha1740_intr_handle(int irq, void *dev_id)
|
|||
guarantee that we will still have it in the
|
||||
cdb when we come back */
|
||||
if ( (adapstat & G2INTST_MASK) == G2INTST_CCBERROR ) {
|
||||
memcpy(SCtmp->sense_buffer, ecbptr->sense,
|
||||
SCSI_SENSE_BUFFERSIZE);
|
||||
memcpy_and_pad(SCtmp->sense_buffer,
|
||||
SCSI_SENSE_BUFFERSIZE,
|
||||
ecbptr->sense,
|
||||
sizeof(ecbptr->sense),
|
||||
0);
|
||||
errstatus = aha1740_makecode(ecbptr->sense,ecbptr->status);
|
||||
} else
|
||||
errstatus = 0;
|
||||
|
|
|
@ -1928,7 +1928,7 @@ ahd_linux_handle_scsi_status(struct ahd_softc *ahd,
|
|||
memcpy(cmd->sense_buffer,
|
||||
ahd_get_sense_buf(ahd, scb)
|
||||
+ sense_offset, sense_size);
|
||||
cmd->result |= (DRIVER_SENSE << 24);
|
||||
set_status_byte(cmd, SAM_STAT_CHECK_CONDITION);
|
||||
|
||||
#ifdef AHD_DEBUG
|
||||
if (ahd_debug & AHD_SHOW_SENSE) {
|
||||
|
@ -2018,6 +2018,7 @@ ahd_linux_queue_cmd_complete(struct ahd_softc *ahd, struct scsi_cmnd *cmd)
|
|||
int new_status = DID_OK;
|
||||
int do_fallback = 0;
|
||||
int scsi_status;
|
||||
struct scsi_sense_data *sense;
|
||||
|
||||
/*
|
||||
* Map CAM error codes into Linux Error codes. We
|
||||
|
@ -2041,18 +2042,12 @@ ahd_linux_queue_cmd_complete(struct ahd_softc *ahd, struct scsi_cmnd *cmd)
|
|||
switch(scsi_status) {
|
||||
case SAM_STAT_COMMAND_TERMINATED:
|
||||
case SAM_STAT_CHECK_CONDITION:
|
||||
if ((cmd->result >> 24) != DRIVER_SENSE) {
|
||||
sense = (struct scsi_sense_data *)
|
||||
cmd->sense_buffer;
|
||||
if (sense->extra_len >= 5 &&
|
||||
(sense->add_sense_code == 0x47
|
||||
|| sense->add_sense_code == 0x48))
|
||||
do_fallback = 1;
|
||||
} else {
|
||||
struct scsi_sense_data *sense;
|
||||
|
||||
sense = (struct scsi_sense_data *)
|
||||
cmd->sense_buffer;
|
||||
if (sense->extra_len >= 5 &&
|
||||
(sense->add_sense_code == 0x47
|
||||
|| sense->add_sense_code == 0x48))
|
||||
do_fallback = 1;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
|
|
@ -1838,7 +1838,6 @@ ahc_linux_handle_scsi_status(struct ahc_softc *ahc,
|
|||
if (sense_size < SCSI_SENSE_BUFFERSIZE)
|
||||
memset(&cmd->sense_buffer[sense_size], 0,
|
||||
SCSI_SENSE_BUFFERSIZE - sense_size);
|
||||
cmd->result |= (DRIVER_SENSE << 24);
|
||||
#ifdef AHC_DEBUG
|
||||
if (ahc_debug & AHC_SHOW_SENSE) {
|
||||
int i;
|
||||
|
|
|
@ -205,7 +205,7 @@ static void asd_task_tasklet_complete(struct asd_ascb *ascb,
|
|||
switch (opcode) {
|
||||
case TC_NO_ERROR:
|
||||
ts->resp = SAS_TASK_COMPLETE;
|
||||
ts->stat = SAM_STAT_GOOD;
|
||||
ts->stat = SAS_SAM_STAT_GOOD;
|
||||
break;
|
||||
case TC_UNDERRUN:
|
||||
ts->resp = SAS_TASK_COMPLETE;
|
||||
|
|
|
@ -49,7 +49,7 @@ struct device_attribute;
|
|||
#define ARCMSR_MAX_OUTSTANDING_CMD 1024
|
||||
#define ARCMSR_DEFAULT_OUTSTANDING_CMD 128
|
||||
#define ARCMSR_MIN_OUTSTANDING_CMD 32
|
||||
#define ARCMSR_DRIVER_VERSION "v1.50.00.02-20200819"
|
||||
#define ARCMSR_DRIVER_VERSION "v1.50.00.05-20210429"
|
||||
#define ARCMSR_SCSI_INITIATOR_ID 255
|
||||
#define ARCMSR_MAX_XFER_SECTORS 512
|
||||
#define ARCMSR_MAX_XFER_SECTORS_B 4096
|
||||
|
|
|
@ -1323,19 +1323,21 @@ static void arcmsr_ccb_complete(struct CommandControlBlock *ccb)
|
|||
|
||||
static void arcmsr_report_sense_info(struct CommandControlBlock *ccb)
|
||||
{
|
||||
|
||||
struct scsi_cmnd *pcmd = ccb->pcmd;
|
||||
struct SENSE_DATA *sensebuffer = (struct SENSE_DATA *)pcmd->sense_buffer;
|
||||
pcmd->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
|
||||
if (sensebuffer) {
|
||||
int sense_data_length =
|
||||
sizeof(struct SENSE_DATA) < SCSI_SENSE_BUFFERSIZE
|
||||
? sizeof(struct SENSE_DATA) : SCSI_SENSE_BUFFERSIZE;
|
||||
memset(sensebuffer, 0, SCSI_SENSE_BUFFERSIZE);
|
||||
memcpy(sensebuffer, ccb->arcmsr_cdb.SenseData, sense_data_length);
|
||||
|
||||
pcmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
|
||||
if (pcmd->sense_buffer) {
|
||||
struct SENSE_DATA *sensebuffer;
|
||||
|
||||
memcpy_and_pad(pcmd->sense_buffer,
|
||||
SCSI_SENSE_BUFFERSIZE,
|
||||
ccb->arcmsr_cdb.SenseData,
|
||||
sizeof(ccb->arcmsr_cdb.SenseData),
|
||||
0);
|
||||
|
||||
sensebuffer = (struct SENSE_DATA *)pcmd->sense_buffer;
|
||||
sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS;
|
||||
sensebuffer->Valid = 1;
|
||||
pcmd->result |= (DRIVER_SENSE << 24);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1923,8 +1925,12 @@ static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandContr
|
|||
|
||||
if (ccb->arc_cdb_size <= 0x300)
|
||||
arc_cdb_size = (ccb->arc_cdb_size - 1) >> 6 | 1;
|
||||
else
|
||||
arc_cdb_size = (((ccb->arc_cdb_size + 0xff) >> 8) + 2) << 1 | 1;
|
||||
else {
|
||||
arc_cdb_size = ((ccb->arc_cdb_size + 0xff) >> 8) + 2;
|
||||
if (arc_cdb_size > 0xF)
|
||||
arc_cdb_size = 0xF;
|
||||
arc_cdb_size = (arc_cdb_size << 1) | 1;
|
||||
}
|
||||
ccb_post_stamp = (ccb->smid | arc_cdb_size);
|
||||
writel(0, &pmu->inbound_queueport_high);
|
||||
writel(ccb_post_stamp, &pmu->inbound_queueport_low);
|
||||
|
@ -2415,10 +2421,17 @@ static void arcmsr_hbaD_doorbell_isr(struct AdapterControlBlock *pACB)
|
|||
|
||||
static void arcmsr_hbaE_doorbell_isr(struct AdapterControlBlock *pACB)
|
||||
{
|
||||
uint32_t outbound_doorbell, in_doorbell, tmp;
|
||||
uint32_t outbound_doorbell, in_doorbell, tmp, i;
|
||||
struct MessageUnit_E __iomem *reg = pACB->pmuE;
|
||||
|
||||
in_doorbell = readl(®->iobound_doorbell);
|
||||
if (pACB->adapter_type == ACB_ADAPTER_TYPE_F) {
|
||||
for (i = 0; i < 5; i++) {
|
||||
in_doorbell = readl(®->iobound_doorbell);
|
||||
if (in_doorbell != 0)
|
||||
break;
|
||||
}
|
||||
} else
|
||||
in_doorbell = readl(®->iobound_doorbell);
|
||||
outbound_doorbell = in_doorbell ^ pACB->in_doorbell;
|
||||
do {
|
||||
writel(0, ®->host_int_status); /* clear interrupt */
|
||||
|
@ -3243,7 +3256,7 @@ static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd,
|
|||
if (!ccb)
|
||||
return SCSI_MLQUEUE_HOST_BUSY;
|
||||
if (arcmsr_build_ccb( acb, ccb, cmd ) == FAILED) {
|
||||
cmd->result = (DID_ERROR << 16) | (RESERVATION_CONFLICT << 1);
|
||||
cmd->result = (DID_ERROR << 16) | SAM_STAT_RESERVATION_CONFLICT;
|
||||
cmd->scsi_done(cmd);
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -794,7 +794,10 @@ static void acornscsi_done(AS_Host *host, struct scsi_cmnd **SCpntp,
|
|||
|
||||
acornscsi_dma_cleanup(host);
|
||||
|
||||
SCpnt->result = result << 16 | host->scsi.SCp.Message << 8 | host->scsi.SCp.Status;
|
||||
set_host_byte(SCpnt, result);
|
||||
if (result == DID_OK)
|
||||
scsi_msg_to_host_byte(SCpnt, host->scsi.SCp.Message);
|
||||
set_status_byte(SCpnt, host->scsi.SCp.Status);
|
||||
|
||||
/*
|
||||
* In theory, this should not happen. In practice, it seems to.
|
||||
|
@ -833,12 +836,12 @@ static void acornscsi_done(AS_Host *host, struct scsi_cmnd **SCpntp,
|
|||
xfer_warn = 0;
|
||||
|
||||
if (xfer_warn) {
|
||||
switch (status_byte(SCpnt->result)) {
|
||||
case CHECK_CONDITION:
|
||||
case COMMAND_TERMINATED:
|
||||
case BUSY:
|
||||
case QUEUE_FULL:
|
||||
case RESERVATION_CONFLICT:
|
||||
switch (get_status_byte(SCpnt)) {
|
||||
case SAM_STAT_CHECK_CONDITION:
|
||||
case SAM_STAT_COMMAND_TERMINATED:
|
||||
case SAM_STAT_BUSY:
|
||||
case SAM_STAT_TASK_SET_FULL:
|
||||
case SAM_STAT_RESERVATION_CONFLICT:
|
||||
break;
|
||||
|
||||
default:
|
||||
|
@ -2470,7 +2473,7 @@ static int acornscsi_queuecmd_lck(struct scsi_cmnd *SCpnt,
|
|||
if (acornscsi_cmdtype(SCpnt->cmnd[0]) == CMD_WRITE && (NO_WRITE & (1 << SCpnt->device->id))) {
|
||||
printk(KERN_CRIT "scsi%d.%c: WRITE attempted with NO_WRITE flag set\n",
|
||||
host->host->host_no, '0' + SCpnt->device->id);
|
||||
SCpnt->result = DID_NO_CONNECT << 16;
|
||||
set_host_byte(SCpnt, DID_NO_CONNECT);
|
||||
done(SCpnt);
|
||||
return 0;
|
||||
}
|
||||
|
@ -2492,7 +2495,7 @@ static int acornscsi_queuecmd_lck(struct scsi_cmnd *SCpnt,
|
|||
unsigned long flags;
|
||||
|
||||
if (!queue_add_cmd_ordered(&host->queues.issue, SCpnt)) {
|
||||
SCpnt->result = DID_ERROR << 16;
|
||||
set_host_byte(SCpnt, DID_ERROR);
|
||||
done(SCpnt);
|
||||
return 0;
|
||||
}
|
||||
|
@ -2506,31 +2509,6 @@ static int acornscsi_queuecmd_lck(struct scsi_cmnd *SCpnt,
|
|||
|
||||
DEF_SCSI_QCMD(acornscsi_queuecmd)
|
||||
|
||||
/*
|
||||
* Prototype: void acornscsi_reportstatus(struct scsi_cmnd **SCpntp1, struct scsi_cmnd **SCpntp2, int result)
|
||||
* Purpose : pass a result to *SCpntp1, and check if *SCpntp1 = *SCpntp2
|
||||
* Params : SCpntp1 - pointer to command to return
|
||||
* SCpntp2 - pointer to command to check
|
||||
* result - result to pass back to mid-level done function
|
||||
* Returns : *SCpntp2 = NULL if *SCpntp1 is the same command structure as *SCpntp2.
|
||||
*/
|
||||
static inline void acornscsi_reportstatus(struct scsi_cmnd **SCpntp1,
|
||||
struct scsi_cmnd **SCpntp2,
|
||||
int result)
|
||||
{
|
||||
struct scsi_cmnd *SCpnt = *SCpntp1;
|
||||
|
||||
if (SCpnt) {
|
||||
*SCpntp1 = NULL;
|
||||
|
||||
SCpnt->result = result;
|
||||
SCpnt->scsi_done(SCpnt);
|
||||
}
|
||||
|
||||
if (SCpnt == *SCpntp2)
|
||||
*SCpntp2 = NULL;
|
||||
}
|
||||
|
||||
enum res_abort { res_not_running, res_success, res_success_clear, res_snooze };
|
||||
|
||||
/*
|
||||
|
|
|
@ -1479,7 +1479,7 @@ static void fas216_busservice_intr(FAS216_Info *info, unsigned int stat, unsigne
|
|||
|
||||
if (msgqueue_msglength(&info->scsi.msgs) > 1)
|
||||
fas216_cmd(info, CMD_SETATN);
|
||||
/*FALLTHROUGH*/
|
||||
fallthrough;
|
||||
|
||||
/*
|
||||
* Any -> Message Out
|
||||
|
@ -2042,8 +2042,10 @@ fas216_std_done(FAS216_Info *info, struct scsi_cmnd *SCpnt, unsigned int result)
|
|||
{
|
||||
info->stats.fins += 1;
|
||||
|
||||
SCpnt->result = result << 16 | info->scsi.SCp.Message << 8 |
|
||||
info->scsi.SCp.Status;
|
||||
set_host_byte(SCpnt, result);
|
||||
if (result == DID_OK)
|
||||
scsi_msg_to_host_byte(SCpnt, info->scsi.SCp.Message);
|
||||
set_status_byte(SCpnt, info->scsi.SCp.Status);
|
||||
|
||||
fas216_log_command(info, LOG_CONNECT, SCpnt,
|
||||
"command complete, result=0x%08x", SCpnt->result);
|
||||
|
@ -2051,23 +2053,22 @@ fas216_std_done(FAS216_Info *info, struct scsi_cmnd *SCpnt, unsigned int result)
|
|||
/*
|
||||
* If the driver detected an error, we're all done.
|
||||
*/
|
||||
if (host_byte(SCpnt->result) != DID_OK ||
|
||||
msg_byte(SCpnt->result) != COMMAND_COMPLETE)
|
||||
if (get_host_byte(SCpnt) != DID_OK)
|
||||
goto done;
|
||||
|
||||
/*
|
||||
* If the command returned CHECK_CONDITION or COMMAND_TERMINATED
|
||||
* status, request the sense information.
|
||||
*/
|
||||
if (status_byte(SCpnt->result) == CHECK_CONDITION ||
|
||||
status_byte(SCpnt->result) == COMMAND_TERMINATED)
|
||||
if (get_status_byte(SCpnt) == SAM_STAT_CHECK_CONDITION ||
|
||||
get_status_byte(SCpnt) == SAM_STAT_COMMAND_TERMINATED)
|
||||
goto request_sense;
|
||||
|
||||
/*
|
||||
* If the command did not complete with GOOD status,
|
||||
* we are all done here.
|
||||
*/
|
||||
if (status_byte(SCpnt->result) != GOOD)
|
||||
if (get_status_byte(SCpnt) != SAM_STAT_GOOD)
|
||||
goto done;
|
||||
|
||||
/*
|
||||
|
|
|
@ -182,6 +182,7 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
|
|||
struct beiscsi_endpoint *beiscsi_ep;
|
||||
struct iscsi_endpoint *ep;
|
||||
uint16_t cri_index;
|
||||
int rc = 0;
|
||||
|
||||
ep = iscsi_lookup_endpoint(transport_fd);
|
||||
if (!ep)
|
||||
|
@ -189,15 +190,17 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
|
|||
|
||||
beiscsi_ep = ep->dd_data;
|
||||
|
||||
if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
|
||||
return -EINVAL;
|
||||
if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) {
|
||||
rc = -EINVAL;
|
||||
goto put_ep;
|
||||
}
|
||||
|
||||
if (beiscsi_ep->phba != phba) {
|
||||
beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
|
||||
"BS_%d : beiscsi_ep->hba=%p not equal to phba=%p\n",
|
||||
beiscsi_ep->phba, phba);
|
||||
|
||||
return -EEXIST;
|
||||
rc = -EEXIST;
|
||||
goto put_ep;
|
||||
}
|
||||
cri_index = BE_GET_CRI_FROM_CID(beiscsi_ep->ep_cid);
|
||||
if (phba->conn_table[cri_index]) {
|
||||
|
@ -209,7 +212,8 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
|
|||
beiscsi_ep->ep_cid,
|
||||
beiscsi_conn,
|
||||
phba->conn_table[cri_index]);
|
||||
return -EINVAL;
|
||||
rc = -EINVAL;
|
||||
goto put_ep;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -226,7 +230,10 @@ int beiscsi_conn_bind(struct iscsi_cls_session *cls_session,
|
|||
"BS_%d : cid %d phba->conn_table[%u]=%p\n",
|
||||
beiscsi_ep->ep_cid, cri_index, beiscsi_conn);
|
||||
phba->conn_table[cri_index] = beiscsi_conn;
|
||||
return 0;
|
||||
|
||||
put_ep:
|
||||
iscsi_put_endpoint(ep);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int beiscsi_iface_create_ipv4(struct beiscsi_hba *phba)
|
||||
|
@ -1293,7 +1300,6 @@ static int beiscsi_conn_close(struct beiscsi_endpoint *beiscsi_ep)
|
|||
void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)
|
||||
{
|
||||
struct beiscsi_endpoint *beiscsi_ep;
|
||||
struct beiscsi_conn *beiscsi_conn;
|
||||
struct beiscsi_hba *phba;
|
||||
uint16_t cri_index;
|
||||
|
||||
|
@ -1312,11 +1318,6 @@ void beiscsi_ep_disconnect(struct iscsi_endpoint *ep)
|
|||
return;
|
||||
}
|
||||
|
||||
if (beiscsi_ep->conn) {
|
||||
beiscsi_conn = beiscsi_ep->conn;
|
||||
iscsi_suspend_queue(beiscsi_conn->conn);
|
||||
}
|
||||
|
||||
if (!beiscsi_hba_is_online(phba)) {
|
||||
beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
|
||||
"BS_%d : HBA in error 0x%lx\n", phba->state);
|
||||
|
|
|
@ -416,7 +416,7 @@ static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
|
|||
"beiscsi_hba_alloc - iscsi_host_alloc failed\n");
|
||||
return NULL;
|
||||
}
|
||||
shost->max_id = BE2_MAX_SESSIONS;
|
||||
shost->max_id = BE2_MAX_SESSIONS - 1;
|
||||
shost->max_channel = 0;
|
||||
shost->max_cmd_len = BEISCSI_MAX_CMD_LEN;
|
||||
shost->max_lun = BEISCSI_NUM_MAX_LUN;
|
||||
|
@ -3858,8 +3858,6 @@ static void beiscsi_free_mem(struct beiscsi_hba *phba)
|
|||
int i, j;
|
||||
|
||||
mem_descr = phba->init_mem;
|
||||
i = 0;
|
||||
j = 0;
|
||||
for (i = 0; i < SE_MEM_MAX; i++) {
|
||||
for (j = mem_descr->num_elements; j > 0; j--) {
|
||||
dma_free_coherent(&phba->pcidev->dev,
|
||||
|
@ -5318,7 +5316,7 @@ static int beiscsi_enable_port(struct beiscsi_hba *phba)
|
|||
/* Re-enable UER. If different TPE occurs then it is recoverable. */
|
||||
beiscsi_set_uer_feature(phba);
|
||||
|
||||
phba->shost->max_id = phba->params.cxns_per_ctrl;
|
||||
phba->shost->max_id = phba->params.cxns_per_ctrl - 1;
|
||||
phba->shost->can_queue = phba->params.ios_per_ctrl;
|
||||
ret = beiscsi_init_port(phba);
|
||||
if (ret < 0) {
|
||||
|
@ -5809,6 +5807,7 @@ struct iscsi_transport beiscsi_iscsi_transport = {
|
|||
.destroy_session = beiscsi_session_destroy,
|
||||
.create_conn = beiscsi_conn_create,
|
||||
.bind_conn = beiscsi_conn_bind,
|
||||
.unbind_conn = iscsi_conn_unbind,
|
||||
.destroy_conn = iscsi_conn_teardown,
|
||||
.attr_is_visible = beiscsi_attr_is_visible,
|
||||
.set_iface_param = beiscsi_iface_set_param,
|
||||
|
|
|
@ -871,7 +871,7 @@ enum bfa_port_linkstate_rsn {
|
|||
|
||||
/*
|
||||
* Initially flash content may be fff. On making LUN mask enable and disable
|
||||
* state chnage. when report lun command is being processed it goes from
|
||||
* state change. when report lun command is being processed it goes from
|
||||
* BFA_LUN_MASK_ACTIVE to BFA_LUN_MASK_FETCH and comes back to
|
||||
* BFA_LUN_MASK_ACTIVE.
|
||||
*/
|
||||
|
|
|
@ -369,13 +369,10 @@ bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
|
|||
enum bfa_plog_eid event,
|
||||
u16 misc, struct fchs_s *fchdr)
|
||||
{
|
||||
struct bfa_plog_rec_s lp;
|
||||
u32 *tmp_int = (u32 *) fchdr;
|
||||
u32 ints[BFA_PL_INT_LOG_SZ];
|
||||
|
||||
if (plog->plog_enabled) {
|
||||
memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
|
||||
|
||||
ints[0] = tmp_int[0];
|
||||
ints[1] = tmp_int[1];
|
||||
ints[2] = tmp_int[4];
|
||||
|
@ -389,13 +386,10 @@ bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
|
|||
enum bfa_plog_eid event, u16 misc, struct fchs_s *fchdr,
|
||||
u32 pld_w0)
|
||||
{
|
||||
struct bfa_plog_rec_s lp;
|
||||
u32 *tmp_int = (u32 *) fchdr;
|
||||
u32 ints[BFA_PL_INT_LOG_SZ];
|
||||
|
||||
if (plog->plog_enabled) {
|
||||
memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
|
||||
|
||||
ints[0] = tmp_int[0];
|
||||
ints[1] = tmp_int[1];
|
||||
ints[2] = tmp_int[4];
|
||||
|
@ -3173,7 +3167,7 @@ bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
|
|||
m->port_cfg = fcport->cfg;
|
||||
m->msgtag = fcport->msgtag;
|
||||
m->port_cfg.maxfrsize = cpu_to_be16(fcport->cfg.maxfrsize);
|
||||
m->use_flash_cfg = fcport->use_flash_cfg;
|
||||
m->use_flash_cfg = fcport->use_flash_cfg;
|
||||
bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa);
|
||||
bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo);
|
||||
bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi);
|
||||
|
|
|
@ -791,7 +791,7 @@ struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
|
|||
return NULL;
|
||||
shost->dma_boundary = cnic->pcidev->dma_mask;
|
||||
shost->transportt = bnx2i_scsi_xport_template;
|
||||
shost->max_id = ISCSI_MAX_CONNS_PER_HBA;
|
||||
shost->max_id = ISCSI_MAX_CONNS_PER_HBA - 1;
|
||||
shost->max_channel = 0;
|
||||
shost->max_lun = 512;
|
||||
shost->max_cmd_len = 16;
|
||||
|
@ -1420,17 +1420,23 @@ static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session,
|
|||
* Forcefully terminate all in progress connection recovery at the
|
||||
* earliest, either in bind(), send_pdu(LOGIN), or conn_start()
|
||||
*/
|
||||
if (bnx2i_adapter_ready(hba))
|
||||
return -EIO;
|
||||
if (bnx2i_adapter_ready(hba)) {
|
||||
ret_code = -EIO;
|
||||
goto put_ep;
|
||||
}
|
||||
|
||||
bnx2i_ep = ep->dd_data;
|
||||
if ((bnx2i_ep->state == EP_STATE_TCP_FIN_RCVD) ||
|
||||
(bnx2i_ep->state == EP_STATE_TCP_RST_RCVD))
|
||||
(bnx2i_ep->state == EP_STATE_TCP_RST_RCVD)) {
|
||||
/* Peer disconnect via' FIN or RST */
|
||||
return -EINVAL;
|
||||
ret_code = -EINVAL;
|
||||
goto put_ep;
|
||||
}
|
||||
|
||||
if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
|
||||
return -EINVAL;
|
||||
if (iscsi_conn_bind(cls_session, cls_conn, is_leading)) {
|
||||
ret_code = -EINVAL;
|
||||
goto put_ep;
|
||||
}
|
||||
|
||||
if (bnx2i_ep->hba != hba) {
|
||||
/* Error - TCP connection does not belong to this device
|
||||
|
@ -1441,7 +1447,8 @@ static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session,
|
|||
iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data,
|
||||
"belong to hba (%s)\n",
|
||||
hba->netdev->name);
|
||||
return -EEXIST;
|
||||
ret_code = -EEXIST;
|
||||
goto put_ep;
|
||||
}
|
||||
bnx2i_ep->conn = bnx2i_conn;
|
||||
bnx2i_conn->ep = bnx2i_ep;
|
||||
|
@ -1458,6 +1465,8 @@ static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session,
|
|||
bnx2i_put_rq_buf(bnx2i_conn, 0);
|
||||
|
||||
bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE);
|
||||
put_ep:
|
||||
iscsi_put_endpoint(ep);
|
||||
return ret_code;
|
||||
}
|
||||
|
||||
|
@ -2113,7 +2122,6 @@ static void bnx2i_ep_disconnect(struct iscsi_endpoint *ep)
|
|||
{
|
||||
struct bnx2i_endpoint *bnx2i_ep;
|
||||
struct bnx2i_conn *bnx2i_conn = NULL;
|
||||
struct iscsi_conn *conn = NULL;
|
||||
struct bnx2i_hba *hba;
|
||||
|
||||
bnx2i_ep = ep->dd_data;
|
||||
|
@ -2126,11 +2134,8 @@ static void bnx2i_ep_disconnect(struct iscsi_endpoint *ep)
|
|||
!time_after(jiffies, bnx2i_ep->timestamp + (12 * HZ)))
|
||||
msleep(250);
|
||||
|
||||
if (bnx2i_ep->conn) {
|
||||
if (bnx2i_ep->conn)
|
||||
bnx2i_conn = bnx2i_ep->conn;
|
||||
conn = bnx2i_conn->cls_conn->dd_data;
|
||||
iscsi_suspend_queue(conn);
|
||||
}
|
||||
hba = bnx2i_ep->hba;
|
||||
|
||||
mutex_lock(&hba->net_dev_lock);
|
||||
|
@ -2276,6 +2281,7 @@ struct iscsi_transport bnx2i_iscsi_transport = {
|
|||
.destroy_session = bnx2i_session_destroy,
|
||||
.create_conn = bnx2i_conn_create,
|
||||
.bind_conn = bnx2i_conn_bind,
|
||||
.unbind_conn = iscsi_conn_unbind,
|
||||
.destroy_conn = bnx2i_conn_destroy,
|
||||
.attr_is_visible = bnx2i_attr_is_visible,
|
||||
.set_param = iscsi_set_param,
|
||||
|
|
|
@ -198,8 +198,9 @@ ch_do_scsi(scsi_changer *ch, unsigned char *cmd, int cmd_len,
|
|||
result = scsi_execute_req(ch->device, cmd, direction, buffer,
|
||||
buflength, &sshdr, timeout * HZ,
|
||||
MAX_RETRIES, NULL);
|
||||
|
||||
if (driver_byte(result) == DRIVER_SENSE) {
|
||||
if (result < 0)
|
||||
return result;
|
||||
if (scsi_sense_valid(&sshdr)) {
|
||||
if (debug)
|
||||
scsi_print_sense_hdr(ch->device, ch->name, &sshdr);
|
||||
errno = ch_find_errno(&sshdr);
|
||||
|
|
|
@ -406,14 +406,10 @@ static const char * const hostbyte_table[]={
|
|||
"DID_TRANSPORT_DISRUPTED", "DID_TRANSPORT_FAILFAST", "DID_TARGET_FAILURE",
|
||||
"DID_NEXUS_FAILURE", "DID_ALLOC_FAILURE", "DID_MEDIUM_ERROR" };
|
||||
|
||||
static const char * const driverbyte_table[]={
|
||||
"DRIVER_OK", "DRIVER_BUSY", "DRIVER_SOFT", "DRIVER_MEDIA", "DRIVER_ERROR",
|
||||
"DRIVER_INVALID", "DRIVER_TIMEOUT", "DRIVER_HARD", "DRIVER_SENSE"};
|
||||
|
||||
const char *scsi_hostbyte_string(int result)
|
||||
{
|
||||
enum scsi_host_status hb = host_byte(result);
|
||||
const char *hb_string = NULL;
|
||||
int hb = host_byte(result);
|
||||
|
||||
if (hb < ARRAY_SIZE(hostbyte_table))
|
||||
hb_string = hostbyte_table[hb];
|
||||
|
@ -421,17 +417,6 @@ const char *scsi_hostbyte_string(int result)
|
|||
}
|
||||
EXPORT_SYMBOL(scsi_hostbyte_string);
|
||||
|
||||
const char *scsi_driverbyte_string(int result)
|
||||
{
|
||||
const char *db_string = NULL;
|
||||
int db = driver_byte(result);
|
||||
|
||||
if (db < ARRAY_SIZE(driverbyte_table))
|
||||
db_string = driverbyte_table[db];
|
||||
return db_string;
|
||||
}
|
||||
EXPORT_SYMBOL(scsi_driverbyte_string);
|
||||
|
||||
#define scsi_mlreturn_name(result) { result, #result }
|
||||
static const struct value_name_pair scsi_mlreturn_arr[] = {
|
||||
scsi_mlreturn_name(NEEDS_RETRY),
|
||||
|
|
|
@ -117,6 +117,7 @@ static struct iscsi_transport cxgb3i_iscsi_transport = {
|
|||
/* connection management */
|
||||
.create_conn = cxgbi_create_conn,
|
||||
.bind_conn = cxgbi_bind_conn,
|
||||
.unbind_conn = iscsi_conn_unbind,
|
||||
.destroy_conn = iscsi_tcp_conn_teardown,
|
||||
.start_conn = iscsi_conn_start,
|
||||
.stop_conn = iscsi_conn_stop,
|
||||
|
|
|
@ -134,6 +134,7 @@ static struct iscsi_transport cxgb4i_iscsi_transport = {
|
|||
/* connection management */
|
||||
.create_conn = cxgbi_create_conn,
|
||||
.bind_conn = cxgbi_bind_conn,
|
||||
.unbind_conn = iscsi_conn_unbind,
|
||||
.destroy_conn = iscsi_tcp_conn_teardown,
|
||||
.start_conn = iscsi_conn_start,
|
||||
.stop_conn = iscsi_conn_stop,
|
||||
|
|
|
@ -337,7 +337,7 @@ void cxgbi_hbas_remove(struct cxgbi_device *cdev)
|
|||
EXPORT_SYMBOL_GPL(cxgbi_hbas_remove);
|
||||
|
||||
int cxgbi_hbas_add(struct cxgbi_device *cdev, u64 max_lun,
|
||||
unsigned int max_id, struct scsi_host_template *sht,
|
||||
unsigned int max_conns, struct scsi_host_template *sht,
|
||||
struct scsi_transport_template *stt)
|
||||
{
|
||||
struct cxgbi_hba *chba;
|
||||
|
@ -357,7 +357,7 @@ int cxgbi_hbas_add(struct cxgbi_device *cdev, u64 max_lun,
|
|||
|
||||
shost->transportt = stt;
|
||||
shost->max_lun = max_lun;
|
||||
shost->max_id = max_id;
|
||||
shost->max_id = max_conns - 1;
|
||||
shost->max_channel = 0;
|
||||
shost->max_cmd_len = SCSI_MAX_VARLEN_CDB_SIZE;
|
||||
|
||||
|
@ -2690,11 +2690,13 @@ int cxgbi_bind_conn(struct iscsi_cls_session *cls_session,
|
|||
err = csk->cdev->csk_ddp_setup_pgidx(csk, csk->tid,
|
||||
ppm->tformat.pgsz_idx_dflt);
|
||||
if (err < 0)
|
||||
return err;
|
||||
goto put_ep;
|
||||
|
||||
err = iscsi_conn_bind(cls_session, cls_conn, is_leading);
|
||||
if (err)
|
||||
return -EINVAL;
|
||||
if (err) {
|
||||
err = -EINVAL;
|
||||
goto put_ep;
|
||||
}
|
||||
|
||||
/* calculate the tag idx bits needed for this conn based on cmds_max */
|
||||
cconn->task_idx_bits = (__ilog2_u32(conn->session->cmds_max - 1)) + 1;
|
||||
|
@ -2715,7 +2717,9 @@ int cxgbi_bind_conn(struct iscsi_cls_session *cls_session,
|
|||
/* init recv engine */
|
||||
iscsi_tcp_hdr_recv_prep(tcp_conn);
|
||||
|
||||
return 0;
|
||||
put_ep:
|
||||
iscsi_put_endpoint(ep);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(cxgbi_bind_conn);
|
||||
|
||||
|
@ -2968,7 +2972,6 @@ void cxgbi_ep_disconnect(struct iscsi_endpoint *ep)
|
|||
ep, cep, cconn, csk, csk->state, csk->flags);
|
||||
|
||||
if (cconn && cconn->iconn) {
|
||||
iscsi_suspend_tx(cconn->iconn);
|
||||
write_lock_bh(&csk->callback_lock);
|
||||
cep->csk->user_data = NULL;
|
||||
cconn->cep = NULL;
|
||||
|
|
|
@ -369,8 +369,7 @@ static int read_cap16(struct scsi_device *sdev, struct llun_info *lli)
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (driver_byte(result) == DRIVER_SENSE) {
|
||||
result &= ~(0xFF<<24); /* DRIVER_SENSE is not an error */
|
||||
if (result > 0 && scsi_sense_valid(&sshdr)) {
|
||||
if (result & SAM_STAT_CHECK_CONDITION) {
|
||||
switch (sshdr.sense_key) {
|
||||
case NO_SENSE:
|
||||
|
|
|
@ -160,22 +160,6 @@
|
|||
#define DC395x_write16(acb,address,value) outw((value), acb->io_port_base + (address))
|
||||
#define DC395x_write32(acb,address,value) outl((value), acb->io_port_base + (address))
|
||||
|
||||
/* cmd->result */
|
||||
#define RES_TARGET 0x000000FF /* Target State */
|
||||
#define RES_TARGET_LNX STATUS_MASK /* Only official ... */
|
||||
#define RES_ENDMSG 0x0000FF00 /* End Message */
|
||||
#define RES_DID 0x00FF0000 /* DID_ codes */
|
||||
#define RES_DRV 0xFF000000 /* DRIVER_ codes */
|
||||
|
||||
#define MK_RES(drv,did,msg,tgt) ((int)(drv)<<24 | (int)(did)<<16 | (int)(msg)<<8 | (int)(tgt))
|
||||
#define MK_RES_LNX(drv,did,msg,tgt) ((int)(drv)<<24 | (int)(did)<<16 | (int)(msg)<<8 | (int)(tgt)<<1)
|
||||
|
||||
#define SET_RES_TARGET(who,tgt) { who &= ~RES_TARGET; who |= (int)(tgt); }
|
||||
#define SET_RES_TARGET_LNX(who,tgt) { who &= ~RES_TARGET_LNX; who |= (int)(tgt) << 1; }
|
||||
#define SET_RES_MSG(who,msg) { who &= ~RES_ENDMSG; who |= (int)(msg) << 8; }
|
||||
#define SET_RES_DID(who,did) { who &= ~RES_DID; who |= (int)(did) << 16; }
|
||||
#define SET_RES_DRV(who,drv) { who &= ~RES_DRV; who |= (int)(drv) << 24; }
|
||||
|
||||
#define TAG_NONE 255
|
||||
|
||||
/*
|
||||
|
@ -986,7 +970,7 @@ static int dc395x_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct s
|
|||
cmd, cmd->device->id, (u8)cmd->device->lun, cmd->cmnd[0]);
|
||||
|
||||
/* Assume BAD_TARGET; will be cleared later */
|
||||
cmd->result = DID_BAD_TARGET << 16;
|
||||
set_host_byte(cmd, DID_BAD_TARGET);
|
||||
|
||||
/* ignore invalid targets */
|
||||
if (cmd->device->id >= acb->scsi_host->max_id ||
|
||||
|
@ -1013,7 +997,8 @@ static int dc395x_queue_command_lck(struct scsi_cmnd *cmd, void (*done)(struct s
|
|||
|
||||
/* set callback and clear result in the command */
|
||||
cmd->scsi_done = done;
|
||||
cmd->result = 0;
|
||||
set_host_byte(cmd, DID_OK);
|
||||
set_status_byte(cmd, SAM_STAT_GOOD);
|
||||
|
||||
srb = list_first_entry_or_null(&acb->srb_free_list,
|
||||
struct ScsiReqBlk, list);
|
||||
|
@ -1250,7 +1235,7 @@ static int dc395x_eh_abort(struct scsi_cmnd *cmd)
|
|||
free_tag(dcb, srb);
|
||||
list_add_tail(&srb->list, &acb->srb_free_list);
|
||||
dprintkl(KERN_DEBUG, "eh_abort: Command was waiting\n");
|
||||
cmd->result = DID_ABORT << 16;
|
||||
set_host_byte(cmd, DID_ABORT);
|
||||
return SUCCESS;
|
||||
}
|
||||
srb = find_cmd(cmd, &dcb->srb_going_list);
|
||||
|
@ -3178,6 +3163,8 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
|
|||
srb, scsi_sg_count(cmd), srb->sg_index, srb->sg_count,
|
||||
scsi_sgtalbe(cmd));
|
||||
status = srb->target_status;
|
||||
set_host_byte(cmd, DID_OK);
|
||||
set_status_byte(cmd, SAM_STAT_GOOD);
|
||||
if (srb->flag & AUTO_REQSENSE) {
|
||||
dprintkdbg(DBG_0, "srb_done: AUTO_REQSENSE1\n");
|
||||
pci_unmap_srb_sense(acb, srb);
|
||||
|
@ -3186,7 +3173,7 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
|
|||
*/
|
||||
srb->flag &= ~AUTO_REQSENSE;
|
||||
srb->adapter_status = 0;
|
||||
srb->target_status = CHECK_CONDITION << 1;
|
||||
srb->target_status = SAM_STAT_CHECK_CONDITION;
|
||||
if (debug_enabled(DBG_1)) {
|
||||
switch (cmd->sense_buffer[2] & 0x0f) {
|
||||
case NOT_READY:
|
||||
|
@ -3233,22 +3220,13 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
|
|||
*((unsigned int *)(cmd->sense_buffer + 3)));
|
||||
}
|
||||
|
||||
if (status == (CHECK_CONDITION << 1)) {
|
||||
cmd->result = DID_BAD_TARGET << 16;
|
||||
if (status == SAM_STAT_CHECK_CONDITION) {
|
||||
set_host_byte(cmd, DID_BAD_TARGET);
|
||||
goto ckc_e;
|
||||
}
|
||||
dprintkdbg(DBG_0, "srb_done: AUTO_REQSENSE2\n");
|
||||
|
||||
if (srb->total_xfer_length
|
||||
&& srb->total_xfer_length >= cmd->underflow)
|
||||
cmd->result =
|
||||
MK_RES_LNX(DRIVER_SENSE, DID_OK,
|
||||
srb->end_message, CHECK_CONDITION);
|
||||
/*SET_RES_DID(cmd->result,DID_OK) */
|
||||
else
|
||||
cmd->result =
|
||||
MK_RES_LNX(DRIVER_SENSE, DID_OK,
|
||||
srb->end_message, CHECK_CONDITION);
|
||||
set_status_byte(cmd, SAM_STAT_CHECK_CONDITION);
|
||||
|
||||
goto ckc_e;
|
||||
}
|
||||
|
@ -3258,10 +3236,10 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
|
|||
/*
|
||||
* target status..........................
|
||||
*/
|
||||
if (status >> 1 == CHECK_CONDITION) {
|
||||
if (status == SAM_STAT_CHECK_CONDITION) {
|
||||
request_sense(acb, dcb, srb);
|
||||
return;
|
||||
} else if (status >> 1 == QUEUE_FULL) {
|
||||
} else if (status == SAM_STAT_TASK_SET_FULL) {
|
||||
tempcnt = (u8)list_size(&dcb->srb_going_list);
|
||||
dprintkl(KERN_INFO, "QUEUE_FULL for dev <%02i-%i> with %i cmnds\n",
|
||||
dcb->target_id, dcb->target_lun, tempcnt);
|
||||
|
@ -3277,13 +3255,11 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
|
|||
} else if (status == SCSI_STAT_SEL_TIMEOUT) {
|
||||
srb->adapter_status = H_SEL_TIMEOUT;
|
||||
srb->target_status = 0;
|
||||
cmd->result = DID_NO_CONNECT << 16;
|
||||
set_host_byte(cmd, DID_NO_CONNECT);
|
||||
} else {
|
||||
srb->adapter_status = 0;
|
||||
SET_RES_DID(cmd->result, DID_ERROR);
|
||||
SET_RES_MSG(cmd->result, srb->end_message);
|
||||
SET_RES_TARGET(cmd->result, status);
|
||||
|
||||
set_host_byte(cmd, DID_ERROR);
|
||||
set_status_byte(cmd, status);
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
|
@ -3292,16 +3268,13 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
|
|||
status = srb->adapter_status;
|
||||
if (status & H_OVER_UNDER_RUN) {
|
||||
srb->target_status = 0;
|
||||
SET_RES_DID(cmd->result, DID_OK);
|
||||
SET_RES_MSG(cmd->result, srb->end_message);
|
||||
scsi_msg_to_host_byte(cmd, srb->end_message);
|
||||
} else if (srb->status & PARITY_ERROR) {
|
||||
SET_RES_DID(cmd->result, DID_PARITY);
|
||||
SET_RES_MSG(cmd->result, srb->end_message);
|
||||
set_host_byte(cmd, DID_PARITY);
|
||||
} else { /* No error */
|
||||
|
||||
srb->adapter_status = 0;
|
||||
srb->target_status = 0;
|
||||
SET_RES_DID(cmd->result, DID_OK);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3322,15 +3295,15 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
|
|||
base = scsi_kmap_atomic_sg(sg, scsi_sg_count(cmd), &offset, &len);
|
||||
ptr = (struct ScsiInqData *)(base + offset);
|
||||
|
||||
if (!ckc_only && (cmd->result & RES_DID) == 0
|
||||
if (!ckc_only && get_host_byte(cmd) == DID_OK
|
||||
&& cmd->cmnd[2] == 0 && scsi_bufflen(cmd) >= 8
|
||||
&& dir != DMA_NONE && ptr && (ptr->Vers & 0x07) >= 2)
|
||||
dcb->inquiry7 = ptr->Flags;
|
||||
|
||||
/*if( srb->cmd->cmnd[0] == INQUIRY && */
|
||||
/* (host_byte(cmd->result) == DID_OK || status_byte(cmd->result) & CHECK_CONDITION) ) */
|
||||
if ((cmd->result == (DID_OK << 16) ||
|
||||
status_byte(cmd->result) == CHECK_CONDITION)) {
|
||||
if ((get_host_byte(cmd) == DID_OK) ||
|
||||
(get_status_byte(cmd) == SAM_STAT_CHECK_CONDITION)) {
|
||||
if (!dcb->init_tcq_flag) {
|
||||
add_dev(acb, dcb, ptr);
|
||||
dcb->init_tcq_flag = 1;
|
||||
|
@ -3357,7 +3330,7 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
|
|||
if (srb != acb->tmp_srb) {
|
||||
/* Add to free list */
|
||||
dprintkdbg(DBG_0, "srb_done: (0x%p) done result=0x%08x\n",
|
||||
cmd, cmd->result);
|
||||
cmd, cmd->result);
|
||||
list_move_tail(&srb->list, &acb->srb_free_list);
|
||||
} else {
|
||||
dprintkl(KERN_ERR, "srb_done: ERROR! Completed cmd with tmp_srb\n");
|
||||
|
@ -3381,16 +3354,14 @@ static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_flag,
|
|||
struct scsi_cmnd *p;
|
||||
|
||||
list_for_each_entry_safe(srb, tmp, &dcb->srb_going_list, list) {
|
||||
int result;
|
||||
|
||||
p = srb->cmd;
|
||||
result = MK_RES(0, did_flag, 0, 0);
|
||||
printk("G:%p(%02i-%i) ", p,
|
||||
p->device->id, (u8)p->device->lun);
|
||||
list_del(&srb->list);
|
||||
free_tag(dcb, srb);
|
||||
list_add_tail(&srb->list, &acb->srb_free_list);
|
||||
p->result = result;
|
||||
set_host_byte(p, did_flag);
|
||||
set_status_byte(p, SAM_STAT_GOOD);
|
||||
pci_unmap_srb_sense(acb, srb);
|
||||
pci_unmap_srb(acb, srb);
|
||||
if (force) {
|
||||
|
@ -3411,14 +3382,13 @@ static void doing_srb_done(struct AdapterCtlBlk *acb, u8 did_flag,
|
|||
|
||||
/* Waiting queue */
|
||||
list_for_each_entry_safe(srb, tmp, &dcb->srb_waiting_list, list) {
|
||||
int result;
|
||||
p = srb->cmd;
|
||||
|
||||
result = MK_RES(0, did_flag, 0, 0);
|
||||
printk("W:%p<%02i-%i>", p, p->device->id,
|
||||
(u8)p->device->lun);
|
||||
list_move_tail(&srb->list, &acb->srb_free_list);
|
||||
p->result = result;
|
||||
set_host_byte(p, did_flag);
|
||||
set_status_byte(p, SAM_STAT_GOOD);
|
||||
pci_unmap_srb_sense(acb, srb);
|
||||
pci_unmap_srb(acb, srb);
|
||||
if (force) {
|
||||
|
|
|
@ -88,6 +88,7 @@ struct alua_dh_data {
|
|||
struct scsi_device *sdev;
|
||||
int init_error;
|
||||
struct mutex init_mutex;
|
||||
bool disabled;
|
||||
};
|
||||
|
||||
struct alua_queue_data {
|
||||
|
@ -517,7 +518,8 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
|
|||
int len, k, off, bufflen = ALUA_RTPG_SIZE;
|
||||
int group_id_old, state_old, pref_old, valid_states_old;
|
||||
unsigned char *desc, *buff;
|
||||
unsigned err, retval;
|
||||
unsigned err;
|
||||
int retval;
|
||||
unsigned int tpg_desc_tbl_off;
|
||||
unsigned char orig_transition_tmo;
|
||||
unsigned long flags;
|
||||
|
@ -562,13 +564,15 @@ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg)
|
|||
kfree(buff);
|
||||
return SCSI_DH_OK;
|
||||
}
|
||||
if (!scsi_sense_valid(&sense_hdr)) {
|
||||
if (retval < 0 || !scsi_sense_valid(&sense_hdr)) {
|
||||
sdev_printk(KERN_INFO, sdev,
|
||||
"%s: rtpg failed, result %d\n",
|
||||
ALUA_DH_NAME, retval);
|
||||
kfree(buff);
|
||||
if (driver_byte(retval) == DRIVER_ERROR)
|
||||
if (retval < 0)
|
||||
return SCSI_DH_DEV_TEMP_BUSY;
|
||||
if (host_byte(retval) == DID_NO_CONNECT)
|
||||
return SCSI_DH_RES_TEMP_UNAVAIL;
|
||||
return SCSI_DH_IO;
|
||||
}
|
||||
|
||||
|
@ -791,11 +795,11 @@ static unsigned alua_stpg(struct scsi_device *sdev, struct alua_port_group *pg)
|
|||
retval = submit_stpg(sdev, pg->group_id, &sense_hdr);
|
||||
|
||||
if (retval) {
|
||||
if (!scsi_sense_valid(&sense_hdr)) {
|
||||
if (retval < 0 || !scsi_sense_valid(&sense_hdr)) {
|
||||
sdev_printk(KERN_INFO, sdev,
|
||||
"%s: stpg failed, result %d",
|
||||
ALUA_DH_NAME, retval);
|
||||
if (driver_byte(retval) == DRIVER_ERROR)
|
||||
if (retval < 0)
|
||||
return SCSI_DH_DEV_TEMP_BUSY;
|
||||
} else {
|
||||
sdev_printk(KERN_INFO, sdev, "%s: stpg failed\n",
|
||||
|
@ -807,6 +811,51 @@ static unsigned alua_stpg(struct scsi_device *sdev, struct alua_port_group *pg)
|
|||
return SCSI_DH_RETRY;
|
||||
}
|
||||
|
||||
static bool alua_rtpg_select_sdev(struct alua_port_group *pg)
|
||||
{
|
||||
struct alua_dh_data *h;
|
||||
struct scsi_device *sdev = NULL;
|
||||
|
||||
lockdep_assert_held(&pg->lock);
|
||||
if (WARN_ON(!pg->rtpg_sdev))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* RCU protection isn't necessary for dh_list here
|
||||
* as we hold pg->lock, but for access to h->pg.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(h, &pg->dh_list, node) {
|
||||
if (!h->sdev)
|
||||
continue;
|
||||
if (h->sdev == pg->rtpg_sdev) {
|
||||
h->disabled = true;
|
||||
continue;
|
||||
}
|
||||
if (rcu_dereference(h->pg) == pg &&
|
||||
!h->disabled &&
|
||||
!scsi_device_get(h->sdev)) {
|
||||
sdev = h->sdev;
|
||||
break;
|
||||
}
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
if (!sdev) {
|
||||
pr_warn("%s: no device found for rtpg\n",
|
||||
(pg->device_id_len ?
|
||||
(char *)pg->device_id_str : "(nameless PG)"));
|
||||
return false;
|
||||
}
|
||||
|
||||
sdev_printk(KERN_INFO, sdev, "rtpg retry on different device\n");
|
||||
|
||||
scsi_device_put(pg->rtpg_sdev);
|
||||
pg->rtpg_sdev = sdev;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void alua_rtpg_work(struct work_struct *work)
|
||||
{
|
||||
struct alua_port_group *pg =
|
||||
|
@ -815,6 +864,7 @@ static void alua_rtpg_work(struct work_struct *work)
|
|||
LIST_HEAD(qdata_list);
|
||||
int err = SCSI_DH_OK;
|
||||
struct alua_queue_data *qdata, *tmp;
|
||||
struct alua_dh_data *h;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&pg->lock, flags);
|
||||
|
@ -848,9 +898,18 @@ static void alua_rtpg_work(struct work_struct *work)
|
|||
}
|
||||
err = alua_rtpg(sdev, pg);
|
||||
spin_lock_irqsave(&pg->lock, flags);
|
||||
if (err == SCSI_DH_RETRY || pg->flags & ALUA_PG_RUN_RTPG) {
|
||||
|
||||
/* If RTPG failed on the current device, try using another */
|
||||
if (err == SCSI_DH_RES_TEMP_UNAVAIL &&
|
||||
alua_rtpg_select_sdev(pg))
|
||||
err = SCSI_DH_IMM_RETRY;
|
||||
|
||||
if (err == SCSI_DH_RETRY || err == SCSI_DH_IMM_RETRY ||
|
||||
pg->flags & ALUA_PG_RUN_RTPG) {
|
||||
pg->flags &= ~ALUA_PG_RUNNING;
|
||||
if (!pg->interval && !(pg->flags & ALUA_PG_RUN_RTPG))
|
||||
if (err == SCSI_DH_IMM_RETRY)
|
||||
pg->interval = 0;
|
||||
else if (!pg->interval && !(pg->flags & ALUA_PG_RUN_RTPG))
|
||||
pg->interval = ALUA_RTPG_RETRY_DELAY;
|
||||
pg->flags |= ALUA_PG_RUN_RTPG;
|
||||
spin_unlock_irqrestore(&pg->lock, flags);
|
||||
|
@ -878,6 +937,12 @@ static void alua_rtpg_work(struct work_struct *work)
|
|||
}
|
||||
|
||||
list_splice_init(&pg->rtpg_list, &qdata_list);
|
||||
/*
|
||||
* We went through an RTPG, for good or bad.
|
||||
* Re-enable all devices for the next attempt.
|
||||
*/
|
||||
list_for_each_entry(h, &pg->dh_list, node)
|
||||
h->disabled = false;
|
||||
pg->rtpg_sdev = NULL;
|
||||
spin_unlock_irqrestore(&pg->lock, flags);
|
||||
|
||||
|
@ -962,6 +1027,7 @@ static int alua_initialize(struct scsi_device *sdev, struct alua_dh_data *h)
|
|||
int err = SCSI_DH_DEV_UNSUPP, tpgs;
|
||||
|
||||
mutex_lock(&h->init_mutex);
|
||||
h->disabled = false;
|
||||
tpgs = alua_check_tpgs(sdev);
|
||||
if (tpgs != TPGS_MODE_NONE)
|
||||
err = alua_check_vpd(sdev, h, tpgs);
|
||||
|
@ -1080,7 +1146,6 @@ static void alua_check(struct scsi_device *sdev, bool force)
|
|||
return;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
alua_rtpg_queue(pg, sdev, NULL, force);
|
||||
kref_put(&pg->kref, release_port_group);
|
||||
}
|
||||
|
|
|
@ -0,0 +1,9 @@
|
|||
config SCSI_EFCT
|
||||
tristate "Emulex Fibre Channel Target"
|
||||
depends on PCI && SCSI
|
||||
depends on TARGET_CORE
|
||||
depends on SCSI_FC_ATTRS
|
||||
select CRC_T10DIF
|
||||
help
|
||||
The efct driver provides enhanced SCSI Target Mode
|
||||
support for specific SLI-4 adapters.
|
|
@ -0,0 +1,18 @@
|
|||
#// SPDX-License-Identifier: GPL-2.0
|
||||
#/*
|
||||
# * Copyright (C) 2021 Broadcom. All Rights Reserved. The term
|
||||
# * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
|
||||
# */
|
||||
|
||||
|
||||
obj-$(CONFIG_SCSI_EFCT) := efct.o
|
||||
|
||||
efct-objs := efct/efct_driver.o efct/efct_io.o efct/efct_scsi.o \
|
||||
efct/efct_xport.o efct/efct_hw.o efct/efct_hw_queues.o \
|
||||
efct/efct_lio.o efct/efct_unsol.o
|
||||
|
||||
efct-objs += libefc/efc_cmds.o libefc/efc_domain.o libefc/efc_fabric.o \
|
||||
libefc/efc_node.o libefc/efc_nport.o libefc/efc_device.o \
|
||||
libefc/efclib.o libefc/efc_sm.o libefc/efc_els.o
|
||||
|
||||
efct-objs += libefc_sli/sli4.o
|
|
@ -0,0 +1,786 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (C) 2021 Broadcom. All Rights Reserved. The term
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
|
||||
*/
|
||||
|
||||
#include "efct_driver.h"
|
||||
|
||||
#include "efct_hw.h"
|
||||
#include "efct_unsol.h"
|
||||
#include "efct_scsi.h"
|
||||
|
||||
LIST_HEAD(efct_devices);
|
||||
|
||||
static int logmask;
|
||||
module_param(logmask, int, 0444);
|
||||
MODULE_PARM_DESC(logmask, "logging bitmask (default 0)");
|
||||
|
||||
static struct libefc_function_template efct_libefc_templ = {
|
||||
.issue_mbox_rqst = efct_issue_mbox_rqst,
|
||||
.send_els = efct_els_hw_srrs_send,
|
||||
.send_bls = efct_efc_bls_send,
|
||||
|
||||
.new_nport = efct_scsi_tgt_new_nport,
|
||||
.del_nport = efct_scsi_tgt_del_nport,
|
||||
.scsi_new_node = efct_scsi_new_initiator,
|
||||
.scsi_del_node = efct_scsi_del_initiator,
|
||||
.hw_seq_free = efct_efc_hw_sequence_free,
|
||||
};
|
||||
|
||||
static int
|
||||
efct_device_init(void)
|
||||
{
|
||||
int rc;
|
||||
|
||||
/* driver-wide init for target-server */
|
||||
rc = efct_scsi_tgt_driver_init();
|
||||
if (rc) {
|
||||
pr_err("efct_scsi_tgt_init failed rc=%d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = efct_scsi_reg_fc_transport();
|
||||
if (rc) {
|
||||
pr_err("failed to register to FC host\n");
|
||||
return rc;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
efct_device_shutdown(void)
|
||||
{
|
||||
efct_scsi_release_fc_transport();
|
||||
|
||||
efct_scsi_tgt_driver_exit();
|
||||
}
|
||||
|
||||
static void *
|
||||
efct_device_alloc(u32 nid)
|
||||
{
|
||||
struct efct *efct = NULL;
|
||||
|
||||
efct = kzalloc_node(sizeof(*efct), GFP_KERNEL, nid);
|
||||
if (!efct)
|
||||
return efct;
|
||||
|
||||
INIT_LIST_HEAD(&efct->list_entry);
|
||||
list_add_tail(&efct->list_entry, &efct_devices);
|
||||
|
||||
return efct;
|
||||
}
|
||||
|
||||
static void
|
||||
efct_teardown_msix(struct efct *efct)
|
||||
{
|
||||
u32 i;
|
||||
|
||||
for (i = 0; i < efct->n_msix_vec; i++) {
|
||||
free_irq(pci_irq_vector(efct->pci, i),
|
||||
&efct->intr_context[i]);
|
||||
}
|
||||
|
||||
pci_free_irq_vectors(efct->pci);
|
||||
}
|
||||
|
||||
static int
|
||||
efct_efclib_config(struct efct *efct, struct libefc_function_template *tt)
|
||||
{
|
||||
struct efc *efc;
|
||||
struct sli4 *sli;
|
||||
int rc = 0;
|
||||
|
||||
efc = kzalloc(sizeof(*efc), GFP_KERNEL);
|
||||
if (!efc)
|
||||
return -ENOMEM;
|
||||
|
||||
efct->efcport = efc;
|
||||
|
||||
memcpy(&efc->tt, tt, sizeof(*tt));
|
||||
efc->base = efct;
|
||||
efc->pci = efct->pci;
|
||||
|
||||
efc->def_wwnn = efct_get_wwnn(&efct->hw);
|
||||
efc->def_wwpn = efct_get_wwpn(&efct->hw);
|
||||
efc->enable_tgt = 1;
|
||||
efc->log_level = EFC_LOG_LIB;
|
||||
|
||||
sli = &efct->hw.sli;
|
||||
efc->max_xfer_size = sli->sge_supported_length *
|
||||
sli_get_max_sgl(&efct->hw.sli);
|
||||
efc->sli = sli;
|
||||
efc->fcfi = efct->hw.fcf_indicator;
|
||||
|
||||
rc = efcport_init(efc);
|
||||
if (rc)
|
||||
efc_log_err(efc, "efcport_init failed\n");
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int efct_request_firmware_update(struct efct *efct);
|
||||
|
||||
static const char*
|
||||
efct_pci_model(u16 device)
|
||||
{
|
||||
switch (device) {
|
||||
case EFCT_DEVICE_LANCER_G6: return "LPE31004";
|
||||
case EFCT_DEVICE_LANCER_G7: return "LPE36000";
|
||||
default: return "unknown";
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
efct_device_attach(struct efct *efct)
|
||||
{
|
||||
u32 rc = 0, i = 0;
|
||||
|
||||
if (efct->attached) {
|
||||
efc_log_err(efct, "Device is already attached\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
snprintf(efct->name, sizeof(efct->name), "[%s%d] ", "fc",
|
||||
efct->instance_index);
|
||||
|
||||
efct->logmask = logmask;
|
||||
efct->filter_def = EFCT_DEFAULT_FILTER;
|
||||
efct->max_isr_time_msec = EFCT_OS_MAX_ISR_TIME_MSEC;
|
||||
|
||||
efct->model = efct_pci_model(efct->pci->device);
|
||||
|
||||
efct->efct_req_fw_upgrade = true;
|
||||
|
||||
/* Allocate transport object and bring online */
|
||||
efct->xport = efct_xport_alloc(efct);
|
||||
if (!efct->xport) {
|
||||
efc_log_err(efct, "failed to allocate transport object\n");
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
rc = efct_xport_attach(efct->xport);
|
||||
if (rc) {
|
||||
efc_log_err(efct, "failed to attach transport object\n");
|
||||
goto xport_out;
|
||||
}
|
||||
|
||||
rc = efct_xport_initialize(efct->xport);
|
||||
if (rc) {
|
||||
efc_log_err(efct, "failed to initialize transport object\n");
|
||||
goto xport_out;
|
||||
}
|
||||
|
||||
rc = efct_efclib_config(efct, &efct_libefc_templ);
|
||||
if (rc) {
|
||||
efc_log_err(efct, "failed to init efclib\n");
|
||||
goto efclib_out;
|
||||
}
|
||||
|
||||
for (i = 0; i < efct->n_msix_vec; i++) {
|
||||
efc_log_debug(efct, "irq %d enabled\n", i);
|
||||
enable_irq(pci_irq_vector(efct->pci, i));
|
||||
}
|
||||
|
||||
efct->attached = true;
|
||||
|
||||
if (efct->efct_req_fw_upgrade)
|
||||
efct_request_firmware_update(efct);
|
||||
|
||||
return rc;
|
||||
|
||||
efclib_out:
|
||||
efct_xport_detach(efct->xport);
|
||||
xport_out:
|
||||
efct_xport_free(efct->xport);
|
||||
efct->xport = NULL;
|
||||
out:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int
|
||||
efct_device_detach(struct efct *efct)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (!efct || !efct->attached) {
|
||||
pr_err("Device is not attached\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (efct_xport_control(efct->xport, EFCT_XPORT_SHUTDOWN))
|
||||
efc_log_err(efct, "Transport Shutdown timed out\n");
|
||||
|
||||
for (i = 0; i < efct->n_msix_vec; i++)
|
||||
disable_irq(pci_irq_vector(efct->pci, i));
|
||||
|
||||
efct_xport_detach(efct->xport);
|
||||
|
||||
efct_xport_free(efct->xport);
|
||||
efct->xport = NULL;
|
||||
|
||||
efcport_destroy(efct->efcport);
|
||||
kfree(efct->efcport);
|
||||
|
||||
efct->attached = false;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
efct_fw_write_cb(int status, u32 actual_write_length,
|
||||
u32 change_status, void *arg)
|
||||
{
|
||||
struct efct_fw_write_result *result = arg;
|
||||
|
||||
result->status = status;
|
||||
result->actual_xfer = actual_write_length;
|
||||
result->change_status = change_status;
|
||||
|
||||
complete(&result->done);
|
||||
}
|
||||
|
||||
static int
|
||||
efct_firmware_write(struct efct *efct, const u8 *buf, size_t buf_len,
|
||||
u8 *change_status)
|
||||
{
|
||||
int rc = 0;
|
||||
u32 bytes_left;
|
||||
u32 xfer_size;
|
||||
u32 offset;
|
||||
struct efc_dma dma;
|
||||
int last = 0;
|
||||
struct efct_fw_write_result result;
|
||||
|
||||
init_completion(&result.done);
|
||||
|
||||
bytes_left = buf_len;
|
||||
offset = 0;
|
||||
|
||||
dma.size = FW_WRITE_BUFSIZE;
|
||||
dma.virt = dma_alloc_coherent(&efct->pci->dev,
|
||||
dma.size, &dma.phys, GFP_DMA);
|
||||
if (!dma.virt)
|
||||
return -ENOMEM;
|
||||
|
||||
while (bytes_left > 0) {
|
||||
if (bytes_left > FW_WRITE_BUFSIZE)
|
||||
xfer_size = FW_WRITE_BUFSIZE;
|
||||
else
|
||||
xfer_size = bytes_left;
|
||||
|
||||
memcpy(dma.virt, buf + offset, xfer_size);
|
||||
|
||||
if (bytes_left == xfer_size)
|
||||
last = 1;
|
||||
|
||||
efct_hw_firmware_write(&efct->hw, &dma, xfer_size, offset,
|
||||
last, efct_fw_write_cb, &result);
|
||||
|
||||
if (wait_for_completion_interruptible(&result.done) != 0) {
|
||||
rc = -ENXIO;
|
||||
break;
|
||||
}
|
||||
|
||||
if (result.actual_xfer == 0 || result.status != 0) {
|
||||
rc = -EFAULT;
|
||||
break;
|
||||
}
|
||||
|
||||
if (last)
|
||||
*change_status = result.change_status;
|
||||
|
||||
bytes_left -= result.actual_xfer;
|
||||
offset += result.actual_xfer;
|
||||
}
|
||||
|
||||
dma_free_coherent(&efct->pci->dev, dma.size, dma.virt, dma.phys);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int
|
||||
efct_fw_reset(struct efct *efct)
|
||||
{
|
||||
/*
|
||||
* Firmware reset to activate the new firmware.
|
||||
* Function 0 will update and load the new firmware
|
||||
* during attach.
|
||||
*/
|
||||
if (timer_pending(&efct->xport->stats_timer))
|
||||
del_timer(&efct->xport->stats_timer);
|
||||
|
||||
if (efct_hw_reset(&efct->hw, EFCT_HW_RESET_FIRMWARE)) {
|
||||
efc_log_info(efct, "failed to reset firmware\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
efc_log_info(efct, "successfully reset firmware.Now resetting port\n");
|
||||
|
||||
efct_device_detach(efct);
|
||||
return efct_device_attach(efct);
|
||||
}
|
||||
|
||||
static int
|
||||
efct_request_firmware_update(struct efct *efct)
|
||||
{
|
||||
int rc = 0;
|
||||
u8 file_name[256], fw_change_status = 0;
|
||||
const struct firmware *fw;
|
||||
struct efct_hw_grp_hdr *fw_image;
|
||||
|
||||
snprintf(file_name, 256, "%s.grp", efct->model);
|
||||
|
||||
rc = request_firmware(&fw, file_name, &efct->pci->dev);
|
||||
if (rc) {
|
||||
efc_log_debug(efct, "Firmware file(%s) not found.\n", file_name);
|
||||
return rc;
|
||||
}
|
||||
|
||||
fw_image = (struct efct_hw_grp_hdr *)fw->data;
|
||||
|
||||
if (!strncmp(efct->hw.sli.fw_name[0], fw_image->revision,
|
||||
strnlen(fw_image->revision, 16))) {
|
||||
efc_log_debug(efct,
|
||||
"Skip update. Firmware is already up to date.\n");
|
||||
goto exit;
|
||||
}
|
||||
|
||||
efc_log_info(efct, "Firmware update is initiated. %s -> %s\n",
|
||||
efct->hw.sli.fw_name[0], fw_image->revision);
|
||||
|
||||
rc = efct_firmware_write(efct, fw->data, fw->size, &fw_change_status);
|
||||
if (rc) {
|
||||
efc_log_err(efct, "Firmware update failed. rc = %d\n", rc);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
efc_log_info(efct, "Firmware updated successfully\n");
|
||||
switch (fw_change_status) {
|
||||
case 0x00:
|
||||
efc_log_info(efct, "New firmware is active.\n");
|
||||
break;
|
||||
case 0x01:
|
||||
efc_log_info(efct,
|
||||
"System reboot needed to activate the new firmware\n");
|
||||
break;
|
||||
case 0x02:
|
||||
case 0x03:
|
||||
efc_log_info(efct,
|
||||
"firmware reset to activate the new firmware\n");
|
||||
efct_fw_reset(efct);
|
||||
break;
|
||||
default:
|
||||
efc_log_info(efct, "Unexpected value change_status:%d\n",
|
||||
fw_change_status);
|
||||
break;
|
||||
}
|
||||
|
||||
exit:
|
||||
release_firmware(fw);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void
|
||||
efct_device_free(struct efct *efct)
|
||||
{
|
||||
if (efct) {
|
||||
list_del(&efct->list_entry);
|
||||
kfree(efct);
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
efct_device_interrupts_required(struct efct *efct)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = efct_hw_setup(&efct->hw, efct, efct->pci);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
|
||||
return efct->hw.config.n_eq;
|
||||
}
|
||||
|
||||
static irqreturn_t
|
||||
efct_intr_thread(int irq, void *handle)
|
||||
{
|
||||
struct efct_intr_context *intr_ctx = handle;
|
||||
struct efct *efct = intr_ctx->efct;
|
||||
|
||||
efct_hw_process(&efct->hw, intr_ctx->index, efct->max_isr_time_msec);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static irqreturn_t
|
||||
efct_intr_msix(int irq, void *handle)
|
||||
{
|
||||
return IRQ_WAKE_THREAD;
|
||||
}
|
||||
|
||||
static int
|
||||
efct_setup_msix(struct efct *efct, u32 num_intrs)
|
||||
{
|
||||
int rc = 0, i;
|
||||
|
||||
if (!pci_find_capability(efct->pci, PCI_CAP_ID_MSIX)) {
|
||||
dev_err(&efct->pci->dev,
|
||||
"%s : MSI-X not available\n", __func__);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
efct->n_msix_vec = num_intrs;
|
||||
|
||||
rc = pci_alloc_irq_vectors(efct->pci, num_intrs, num_intrs,
|
||||
PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
|
||||
|
||||
if (rc < 0) {
|
||||
dev_err(&efct->pci->dev, "Failed to alloc irq : %d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
for (i = 0; i < num_intrs; i++) {
|
||||
struct efct_intr_context *intr_ctx = NULL;
|
||||
|
||||
intr_ctx = &efct->intr_context[i];
|
||||
intr_ctx->efct = efct;
|
||||
intr_ctx->index = i;
|
||||
|
||||
rc = request_threaded_irq(pci_irq_vector(efct->pci, i),
|
||||
efct_intr_msix, efct_intr_thread, 0,
|
||||
EFCT_DRIVER_NAME, intr_ctx);
|
||||
if (rc) {
|
||||
dev_err(&efct->pci->dev,
|
||||
"Failed to register %d vector: %d\n", i, rc);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
return rc;
|
||||
|
||||
out:
|
||||
while (--i >= 0)
|
||||
free_irq(pci_irq_vector(efct->pci, i),
|
||||
&efct->intr_context[i]);
|
||||
|
||||
pci_free_irq_vectors(efct->pci);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static struct pci_device_id efct_pci_table[] = {
|
||||
{PCI_DEVICE(EFCT_VENDOR_ID, EFCT_DEVICE_LANCER_G6), 0},
|
||||
{PCI_DEVICE(EFCT_VENDOR_ID, EFCT_DEVICE_LANCER_G7), 0},
|
||||
{} /* terminate list */
|
||||
};
|
||||
|
||||
static int
|
||||
efct_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
{
|
||||
struct efct *efct = NULL;
|
||||
int rc;
|
||||
u32 i, r;
|
||||
int num_interrupts = 0;
|
||||
int nid;
|
||||
|
||||
dev_info(&pdev->dev, "%s\n", EFCT_DRIVER_NAME);
|
||||
|
||||
rc = pci_enable_device_mem(pdev);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
pci_set_master(pdev);
|
||||
|
||||
rc = pci_set_mwi(pdev);
|
||||
if (rc) {
|
||||
dev_info(&pdev->dev, "pci_set_mwi returned %d\n", rc);
|
||||
goto mwi_out;
|
||||
}
|
||||
|
||||
rc = pci_request_regions(pdev, EFCT_DRIVER_NAME);
|
||||
if (rc) {
|
||||
dev_err(&pdev->dev, "pci_request_regions failed %d\n", rc);
|
||||
goto req_regions_out;
|
||||
}
|
||||
|
||||
/* Fetch the Numa node id for this device */
|
||||
nid = dev_to_node(&pdev->dev);
|
||||
if (nid < 0) {
|
||||
dev_err(&pdev->dev, "Warning Numa node ID is %d\n", nid);
|
||||
nid = 0;
|
||||
}
|
||||
|
||||
/* Allocate efct */
|
||||
efct = efct_device_alloc(nid);
|
||||
if (!efct) {
|
||||
dev_err(&pdev->dev, "Failed to allocate efct\n");
|
||||
rc = -ENOMEM;
|
||||
goto alloc_out;
|
||||
}
|
||||
|
||||
efct->pci = pdev;
|
||||
efct->numa_node = nid;
|
||||
|
||||
/* Map all memory BARs */
|
||||
for (i = 0, r = 0; i < EFCT_PCI_MAX_REGS; i++) {
|
||||
if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
|
||||
efct->reg[r] = ioremap(pci_resource_start(pdev, i),
|
||||
pci_resource_len(pdev, i));
|
||||
r++;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the 64-bit attribute is set, both this BAR and the
|
||||
* next form the complete address. Skip processing the
|
||||
* next BAR.
|
||||
*/
|
||||
if (pci_resource_flags(pdev, i) & IORESOURCE_MEM_64)
|
||||
i++;
|
||||
}
|
||||
|
||||
pci_set_drvdata(pdev, efct);
|
||||
|
||||
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0 ||
|
||||
pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
|
||||
dev_warn(&pdev->dev, "trying DMA_BIT_MASK(32)\n");
|
||||
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0 ||
|
||||
pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
|
||||
dev_err(&pdev->dev, "setting DMA_BIT_MASK failed\n");
|
||||
rc = -1;
|
||||
goto dma_mask_out;
|
||||
}
|
||||
}
|
||||
|
||||
num_interrupts = efct_device_interrupts_required(efct);
|
||||
if (num_interrupts < 0) {
|
||||
efc_log_err(efct, "efct_device_interrupts_required failed\n");
|
||||
rc = -1;
|
||||
goto dma_mask_out;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize MSIX interrupts, note,
|
||||
* efct_setup_msix() enables the interrupt
|
||||
*/
|
||||
rc = efct_setup_msix(efct, num_interrupts);
|
||||
if (rc) {
|
||||
dev_err(&pdev->dev, "Can't setup msix\n");
|
||||
goto dma_mask_out;
|
||||
}
|
||||
/* Disable interrupt for now */
|
||||
for (i = 0; i < efct->n_msix_vec; i++) {
|
||||
efc_log_debug(efct, "irq %d disabled\n", i);
|
||||
disable_irq(pci_irq_vector(efct->pci, i));
|
||||
}
|
||||
|
||||
rc = efct_device_attach(efct);
|
||||
if (rc)
|
||||
goto attach_out;
|
||||
|
||||
return 0;
|
||||
|
||||
attach_out:
|
||||
efct_teardown_msix(efct);
|
||||
dma_mask_out:
|
||||
pci_set_drvdata(pdev, NULL);
|
||||
|
||||
for (i = 0; i < EFCT_PCI_MAX_REGS; i++) {
|
||||
if (efct->reg[i])
|
||||
iounmap(efct->reg[i]);
|
||||
}
|
||||
efct_device_free(efct);
|
||||
alloc_out:
|
||||
pci_release_regions(pdev);
|
||||
req_regions_out:
|
||||
pci_clear_mwi(pdev);
|
||||
mwi_out:
|
||||
pci_disable_device(pdev);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void
|
||||
efct_pci_remove(struct pci_dev *pdev)
|
||||
{
|
||||
struct efct *efct = pci_get_drvdata(pdev);
|
||||
u32 i;
|
||||
|
||||
if (!efct)
|
||||
return;
|
||||
|
||||
efct_device_detach(efct);
|
||||
|
||||
efct_teardown_msix(efct);
|
||||
|
||||
for (i = 0; i < EFCT_PCI_MAX_REGS; i++) {
|
||||
if (efct->reg[i])
|
||||
iounmap(efct->reg[i]);
|
||||
}
|
||||
|
||||
pci_set_drvdata(pdev, NULL);
|
||||
|
||||
efct_device_free(efct);
|
||||
|
||||
pci_release_regions(pdev);
|
||||
|
||||
pci_disable_device(pdev);
|
||||
}
|
||||
|
||||
static void
|
||||
efct_device_prep_for_reset(struct efct *efct, struct pci_dev *pdev)
|
||||
{
|
||||
if (efct) {
|
||||
efc_log_debug(efct,
|
||||
"PCI channel disable preparing for reset\n");
|
||||
efct_device_detach(efct);
|
||||
/* Disable interrupt and pci device */
|
||||
efct_teardown_msix(efct);
|
||||
}
|
||||
pci_disable_device(pdev);
|
||||
}
|
||||
|
||||
static void
|
||||
efct_device_prep_for_recover(struct efct *efct)
|
||||
{
|
||||
if (efct) {
|
||||
efc_log_debug(efct, "PCI channel preparing for recovery\n");
|
||||
efct_hw_io_abort_all(&efct->hw);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* efct_pci_io_error_detected - method for handling PCI I/O error
|
||||
* @pdev: pointer to PCI device.
|
||||
* @state: the current PCI connection state.
|
||||
*
|
||||
* This routine is registered to the PCI subsystem for error handling. This
|
||||
* function is called by the PCI subsystem after a PCI bus error affecting
|
||||
* this device has been detected. When this routine is invoked, it dispatches
|
||||
* device error detected handling routine, which will perform the proper
|
||||
* error detected operation.
|
||||
*
|
||||
* Return codes
|
||||
* PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
|
||||
* PCI_ERS_RESULT_DISCONNECT - device could not be recovered
|
||||
*/
|
||||
static pci_ers_result_t
|
||||
efct_pci_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
|
||||
{
|
||||
struct efct *efct = pci_get_drvdata(pdev);
|
||||
pci_ers_result_t rc;
|
||||
|
||||
switch (state) {
|
||||
case pci_channel_io_normal:
|
||||
efct_device_prep_for_recover(efct);
|
||||
rc = PCI_ERS_RESULT_CAN_RECOVER;
|
||||
break;
|
||||
case pci_channel_io_frozen:
|
||||
efct_device_prep_for_reset(efct, pdev);
|
||||
rc = PCI_ERS_RESULT_NEED_RESET;
|
||||
break;
|
||||
case pci_channel_io_perm_failure:
|
||||
efct_device_detach(efct);
|
||||
rc = PCI_ERS_RESULT_DISCONNECT;
|
||||
break;
|
||||
default:
|
||||
efc_log_debug(efct, "Unknown PCI error state:0x%x\n", state);
|
||||
efct_device_prep_for_reset(efct, pdev);
|
||||
rc = PCI_ERS_RESULT_NEED_RESET;
|
||||
break;
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static pci_ers_result_t
|
||||
efct_pci_io_slot_reset(struct pci_dev *pdev)
|
||||
{
|
||||
int rc;
|
||||
struct efct *efct = pci_get_drvdata(pdev);
|
||||
|
||||
rc = pci_enable_device_mem(pdev);
|
||||
if (rc) {
|
||||
efc_log_err(efct, "failed to enable PCI device after reset\n");
|
||||
return PCI_ERS_RESULT_DISCONNECT;
|
||||
}
|
||||
|
||||
/*
|
||||
* As the new kernel behavior of pci_restore_state() API call clears
|
||||
* device saved_state flag, need to save the restored state again.
|
||||
*/
|
||||
|
||||
pci_save_state(pdev);
|
||||
|
||||
pci_set_master(pdev);
|
||||
|
||||
rc = efct_setup_msix(efct, efct->n_msix_vec);
|
||||
if (rc)
|
||||
efc_log_err(efct, "rc %d returned, IRQ allocation failed\n",
|
||||
rc);
|
||||
|
||||
/* Perform device reset */
|
||||
efct_device_detach(efct);
|
||||
/* Bring device to online*/
|
||||
efct_device_attach(efct);
|
||||
|
||||
return PCI_ERS_RESULT_RECOVERED;
|
||||
}
|
||||
|
||||
static void
|
||||
efct_pci_io_resume(struct pci_dev *pdev)
|
||||
{
|
||||
struct efct *efct = pci_get_drvdata(pdev);
|
||||
|
||||
/* Perform device reset */
|
||||
efct_device_detach(efct);
|
||||
/* Bring device to online*/
|
||||
efct_device_attach(efct);
|
||||
}
|
||||
|
||||
MODULE_DEVICE_TABLE(pci, efct_pci_table);
|
||||
|
||||
static struct pci_error_handlers efct_pci_err_handler = {
|
||||
.error_detected = efct_pci_io_error_detected,
|
||||
.slot_reset = efct_pci_io_slot_reset,
|
||||
.resume = efct_pci_io_resume,
|
||||
};
|
||||
|
||||
static struct pci_driver efct_pci_driver = {
|
||||
.name = EFCT_DRIVER_NAME,
|
||||
.id_table = efct_pci_table,
|
||||
.probe = efct_pci_probe,
|
||||
.remove = efct_pci_remove,
|
||||
.err_handler = &efct_pci_err_handler,
|
||||
};
|
||||
|
||||
static
|
||||
int __init efct_init(void)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = efct_device_init();
|
||||
if (rc) {
|
||||
pr_err("efct_device_init failed rc=%d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = pci_register_driver(&efct_pci_driver);
|
||||
if (rc) {
|
||||
pr_err("pci_register_driver failed rc=%d\n", rc);
|
||||
efct_device_shutdown();
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void __exit efct_exit(void)
|
||||
{
|
||||
pci_unregister_driver(&efct_pci_driver);
|
||||
efct_device_shutdown();
|
||||
}
|
||||
|
||||
module_init(efct_init);
|
||||
module_exit(efct_exit);
|
||||
MODULE_VERSION(EFCT_DRIVER_VERSION);
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Broadcom");
|
|
@ -0,0 +1,109 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (C) 2021 Broadcom. All Rights Reserved. The term
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
|
||||
*/
|
||||
|
||||
#if !defined(__EFCT_DRIVER_H__)
|
||||
#define __EFCT_DRIVER_H__
|
||||
|
||||
/***************************************************************************
|
||||
* OS specific includes
|
||||
*/
|
||||
#include <stdarg.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/firmware.h>
|
||||
#include "../include/efc_common.h"
|
||||
#include "../libefc/efclib.h"
|
||||
#include "efct_hw.h"
|
||||
#include "efct_io.h"
|
||||
#include "efct_xport.h"
|
||||
|
||||
#define EFCT_DRIVER_NAME "efct"
|
||||
#define EFCT_DRIVER_VERSION "1.0.0.0"
|
||||
|
||||
/* EFCT_DEFAULT_FILTER-
|
||||
* MRQ filter to segregate the IO flow.
|
||||
*/
|
||||
#define EFCT_DEFAULT_FILTER "0x01ff22ff,0,0,0"
|
||||
|
||||
/* EFCT_OS_MAX_ISR_TIME_MSEC -
|
||||
* maximum time driver code should spend in an interrupt
|
||||
* or kernel thread context without yielding
|
||||
*/
|
||||
#define EFCT_OS_MAX_ISR_TIME_MSEC 1000
|
||||
|
||||
#define EFCT_FC_MAX_SGL 64
|
||||
#define EFCT_FC_DIF_SEED 0
|
||||
|
||||
/* Watermark */
|
||||
#define EFCT_WATERMARK_HIGH_PCT 90
|
||||
#define EFCT_WATERMARK_LOW_PCT 80
|
||||
#define EFCT_IO_WATERMARK_PER_INITIATOR 8
|
||||
|
||||
#define EFCT_PCI_MAX_REGS 6
|
||||
#define MAX_PCI_INTERRUPTS 16
|
||||
|
||||
struct efct_intr_context {
|
||||
struct efct *efct;
|
||||
u32 index;
|
||||
};
|
||||
|
||||
struct efct {
|
||||
struct pci_dev *pci;
|
||||
void __iomem *reg[EFCT_PCI_MAX_REGS];
|
||||
|
||||
u32 n_msix_vec;
|
||||
bool attached;
|
||||
bool soft_wwn_enable;
|
||||
u8 efct_req_fw_upgrade;
|
||||
struct efct_intr_context intr_context[MAX_PCI_INTERRUPTS];
|
||||
u32 numa_node;
|
||||
|
||||
char name[EFC_NAME_LENGTH];
|
||||
u32 instance_index;
|
||||
struct list_head list_entry;
|
||||
struct efct_scsi_tgt tgt_efct;
|
||||
struct efct_xport *xport;
|
||||
struct efc *efcport;
|
||||
struct Scsi_Host *shost;
|
||||
int logmask;
|
||||
u32 max_isr_time_msec;
|
||||
|
||||
const char *desc;
|
||||
|
||||
const char *model;
|
||||
|
||||
struct efct_hw hw;
|
||||
|
||||
u32 rq_selection_policy;
|
||||
char *filter_def;
|
||||
int topology;
|
||||
|
||||
/* Look up for target node */
|
||||
struct xarray lookup;
|
||||
|
||||
/*
|
||||
* Target IO timer value:
|
||||
* Zero: target command timeout disabled.
|
||||
* Non-zero: Timeout value, in seconds, for target commands
|
||||
*/
|
||||
u32 target_io_timer_sec;
|
||||
|
||||
int speed;
|
||||
struct dentry *sess_debugfs_dir;
|
||||
};
|
||||
|
||||
#define FW_WRITE_BUFSIZE (64 * 1024)
|
||||
|
||||
struct efct_fw_write_result {
|
||||
struct completion done;
|
||||
int status;
|
||||
u32 actual_xfer;
|
||||
u32 change_status;
|
||||
};
|
||||
|
||||
extern struct list_head efct_devices;
|
||||
|
||||
#endif /* __EFCT_DRIVER_H__ */
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,764 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (C) 2021 Broadcom. All Rights Reserved. The term
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
|
||||
*/
|
||||
|
||||
#ifndef _EFCT_HW_H
|
||||
#define _EFCT_HW_H
|
||||
|
||||
#include "../libefc_sli/sli4.h"
|
||||
|
||||
/*
|
||||
* EFCT PCI IDs
|
||||
*/
|
||||
#define EFCT_VENDOR_ID 0x10df
|
||||
/* LightPulse 16Gb x 4 FC (lancer-g6) */
|
||||
#define EFCT_DEVICE_LANCER_G6 0xe307
|
||||
/* LightPulse 32Gb x 4 FC (lancer-g7) */
|
||||
#define EFCT_DEVICE_LANCER_G7 0xf407
|
||||
|
||||
/*Default RQ entries len used by driver*/
|
||||
#define EFCT_HW_RQ_ENTRIES_MIN 512
|
||||
#define EFCT_HW_RQ_ENTRIES_DEF 1024
|
||||
#define EFCT_HW_RQ_ENTRIES_MAX 4096
|
||||
|
||||
/*Defines the size of the RQ buffers used for each RQ*/
|
||||
#define EFCT_HW_RQ_SIZE_HDR 128
|
||||
#define EFCT_HW_RQ_SIZE_PAYLOAD 1024
|
||||
|
||||
/*Define the maximum number of multi-receive queues*/
|
||||
#define EFCT_HW_MAX_MRQS 8
|
||||
|
||||
/*
|
||||
* Define count of when to set the WQEC bit in a submitted
|
||||
* WQE, causing a consummed/released completion to be posted.
|
||||
*/
|
||||
#define EFCT_HW_WQEC_SET_COUNT 32
|
||||
|
||||
/*Send frame timeout in seconds*/
|
||||
#define EFCT_HW_SEND_FRAME_TIMEOUT 10
|
||||
|
||||
/*
|
||||
* FDT Transfer Hint value, reads greater than this value
|
||||
* will be segmented to implement fairness. A value of zero disables
|
||||
* the feature.
|
||||
*/
|
||||
#define EFCT_HW_FDT_XFER_HINT 8192
|
||||
|
||||
#define EFCT_HW_TIMECHECK_ITERATIONS 100
|
||||
#define EFCT_HW_MAX_NUM_MQ 1
|
||||
#define EFCT_HW_MAX_NUM_RQ 32
|
||||
#define EFCT_HW_MAX_NUM_EQ 16
|
||||
#define EFCT_HW_MAX_NUM_WQ 32
|
||||
#define EFCT_HW_DEF_NUM_EQ 1
|
||||
|
||||
#define OCE_HW_MAX_NUM_MRQ_PAIRS 16
|
||||
|
||||
#define EFCT_HW_MQ_DEPTH 128
|
||||
#define EFCT_HW_EQ_DEPTH 1024
|
||||
|
||||
/*
|
||||
* A CQ will be assinged to each WQ
|
||||
* (CQ must have 2X entries of the WQ for abort
|
||||
* processing), plus a separate one for each RQ PAIR and one for MQ
|
||||
*/
|
||||
#define EFCT_HW_MAX_NUM_CQ \
|
||||
((EFCT_HW_MAX_NUM_WQ * 2) + 1 + (OCE_HW_MAX_NUM_MRQ_PAIRS * 2))
|
||||
|
||||
#define EFCT_HW_Q_HASH_SIZE 128
|
||||
#define EFCT_HW_RQ_HEADER_SIZE 128
|
||||
#define EFCT_HW_RQ_HEADER_INDEX 0
|
||||
|
||||
#define EFCT_HW_REQUE_XRI_REGTAG 65534
|
||||
|
||||
/* Options for efct_hw_command() */
|
||||
enum efct_cmd_opts {
|
||||
/* command executes synchronously and busy-waits for completion */
|
||||
EFCT_CMD_POLL,
|
||||
/* command executes asynchronously. Uses callback */
|
||||
EFCT_CMD_NOWAIT,
|
||||
};
|
||||
|
||||
enum efct_hw_reset {
|
||||
EFCT_HW_RESET_FUNCTION,
|
||||
EFCT_HW_RESET_FIRMWARE,
|
||||
EFCT_HW_RESET_MAX
|
||||
};
|
||||
|
||||
enum efct_hw_topo {
|
||||
EFCT_HW_TOPOLOGY_AUTO,
|
||||
EFCT_HW_TOPOLOGY_NPORT,
|
||||
EFCT_HW_TOPOLOGY_LOOP,
|
||||
EFCT_HW_TOPOLOGY_NONE,
|
||||
EFCT_HW_TOPOLOGY_MAX
|
||||
};
|
||||
|
||||
/* pack fw revision values into a single uint64_t */
|
||||
#define HW_FWREV(a, b, c, d) (((uint64_t)(a) << 48) | ((uint64_t)(b) << 32) \
|
||||
| ((uint64_t)(c) << 16) | ((uint64_t)(d)))
|
||||
|
||||
#define EFCT_FW_VER_STR(a, b, c, d) (#a "." #b "." #c "." #d)
|
||||
|
||||
enum efct_hw_io_type {
|
||||
EFCT_HW_ELS_REQ,
|
||||
EFCT_HW_ELS_RSP,
|
||||
EFCT_HW_FC_CT,
|
||||
EFCT_HW_FC_CT_RSP,
|
||||
EFCT_HW_BLS_ACC,
|
||||
EFCT_HW_BLS_RJT,
|
||||
EFCT_HW_IO_TARGET_READ,
|
||||
EFCT_HW_IO_TARGET_WRITE,
|
||||
EFCT_HW_IO_TARGET_RSP,
|
||||
EFCT_HW_IO_DNRX_REQUEUE,
|
||||
EFCT_HW_IO_MAX,
|
||||
};
|
||||
|
||||
enum efct_hw_io_state {
|
||||
EFCT_HW_IO_STATE_FREE,
|
||||
EFCT_HW_IO_STATE_INUSE,
|
||||
EFCT_HW_IO_STATE_WAIT_FREE,
|
||||
EFCT_HW_IO_STATE_WAIT_SEC_HIO,
|
||||
};
|
||||
|
||||
#define EFCT_TARGET_WRITE_SKIPS 1
|
||||
#define EFCT_TARGET_READ_SKIPS 2
|
||||
|
||||
struct efct_hw;
|
||||
struct efct_io;
|
||||
|
||||
#define EFCT_CMD_CTX_POOL_SZ 32
|
||||
/**
|
||||
* HW command context.
|
||||
* Stores the state for the asynchronous commands sent to the hardware.
|
||||
*/
|
||||
struct efct_command_ctx {
|
||||
struct list_head list_entry;
|
||||
int (*cb)(struct efct_hw *hw, int status, u8 *mqe, void *arg);
|
||||
void *arg; /* Argument for callback */
|
||||
/* buffer holding command / results */
|
||||
u8 buf[SLI4_BMBX_SIZE];
|
||||
void *ctx; /* upper layer context */
|
||||
};
|
||||
|
||||
struct efct_hw_sgl {
|
||||
uintptr_t addr;
|
||||
size_t len;
|
||||
};
|
||||
|
||||
union efct_hw_io_param_u {
|
||||
struct sli_bls_params bls;
|
||||
struct sli_els_params els;
|
||||
struct sli_ct_params fc_ct;
|
||||
struct sli_fcp_tgt_params fcp_tgt;
|
||||
};
|
||||
|
||||
/* WQ steering mode */
|
||||
enum efct_hw_wq_steering {
|
||||
EFCT_HW_WQ_STEERING_CLASS,
|
||||
EFCT_HW_WQ_STEERING_REQUEST,
|
||||
EFCT_HW_WQ_STEERING_CPU,
|
||||
};
|
||||
|
||||
/* HW wqe object */
|
||||
struct efct_hw_wqe {
|
||||
struct list_head list_entry;
|
||||
bool abort_wqe_submit_needed;
|
||||
bool send_abts;
|
||||
u32 id;
|
||||
u32 abort_reqtag;
|
||||
u8 *wqebuf;
|
||||
};
|
||||
|
||||
struct efct_hw_io;
|
||||
/* Typedef for HW "done" callback */
|
||||
typedef int (*efct_hw_done_t)(struct efct_hw_io *, u32 len, int status,
|
||||
u32 ext, void *ul_arg);
|
||||
|
||||
/**
|
||||
* HW IO object.
|
||||
*
|
||||
* Stores the per-IO information necessary
|
||||
* for both SLI and efct.
|
||||
* @ref: reference counter for hw io object
|
||||
* @state: state of IO: free, busy, wait_free
|
||||
* @list_entry used for busy, wait_free, free lists
|
||||
* @wqe Work queue object, with link for pending
|
||||
* @hw pointer back to hardware context
|
||||
* @xfer_rdy transfer ready data
|
||||
* @type IO type
|
||||
* @xbusy Exchange is active in FW
|
||||
* @abort_in_progress if TRUE, abort is in progress
|
||||
* @status_saved if TRUE, latched status should be returned
|
||||
* @wq_class WQ class if steering mode is Class
|
||||
* @reqtag request tag for this HW IO
|
||||
* @wq WQ assigned to the exchange
|
||||
* @done Function called on IO completion
|
||||
* @arg argument passed to IO done callback
|
||||
* @abort_done Function called on abort completion
|
||||
* @abort_arg argument passed to abort done callback
|
||||
* @wq_steering WQ steering mode request
|
||||
* @saved_status Saved status
|
||||
* @saved_len Status length
|
||||
* @saved_ext Saved extended status
|
||||
* @eq EQ on which this HIO came up
|
||||
* @sge_offset SGE data offset
|
||||
* @def_sgl_count Count of SGEs in default SGL
|
||||
* @abort_reqtag request tag for an abort of this HW IO
|
||||
* @indicator Exchange indicator
|
||||
* @def_sgl default SGL
|
||||
* @sgl pointer to current active SGL
|
||||
* @sgl_count count of SGEs in io->sgl
|
||||
* @first_data_sge index of first data SGE
|
||||
* @n_sge number of active SGEs
|
||||
*/
|
||||
struct efct_hw_io {
|
||||
struct kref ref;
|
||||
enum efct_hw_io_state state;
|
||||
void (*release)(struct kref *arg);
|
||||
struct list_head list_entry;
|
||||
struct efct_hw_wqe wqe;
|
||||
|
||||
struct efct_hw *hw;
|
||||
struct efc_dma xfer_rdy;
|
||||
u16 type;
|
||||
bool xbusy;
|
||||
int abort_in_progress;
|
||||
bool status_saved;
|
||||
u8 wq_class;
|
||||
u16 reqtag;
|
||||
|
||||
struct hw_wq *wq;
|
||||
efct_hw_done_t done;
|
||||
void *arg;
|
||||
efct_hw_done_t abort_done;
|
||||
void *abort_arg;
|
||||
|
||||
enum efct_hw_wq_steering wq_steering;
|
||||
|
||||
u32 saved_status;
|
||||
u32 saved_len;
|
||||
u32 saved_ext;
|
||||
|
||||
struct hw_eq *eq;
|
||||
u32 sge_offset;
|
||||
u32 def_sgl_count;
|
||||
u32 abort_reqtag;
|
||||
u32 indicator;
|
||||
struct efc_dma def_sgl;
|
||||
struct efc_dma *sgl;
|
||||
u32 sgl_count;
|
||||
u32 first_data_sge;
|
||||
u32 n_sge;
|
||||
};
|
||||
|
||||
enum efct_hw_port {
|
||||
EFCT_HW_PORT_INIT,
|
||||
EFCT_HW_PORT_SHUTDOWN,
|
||||
};
|
||||
|
||||
/* Node group rpi reference */
|
||||
struct efct_hw_rpi_ref {
|
||||
atomic_t rpi_count;
|
||||
atomic_t rpi_attached;
|
||||
};
|
||||
|
||||
enum efct_hw_link_stat {
|
||||
EFCT_HW_LINK_STAT_LINK_FAILURE_COUNT,
|
||||
EFCT_HW_LINK_STAT_LOSS_OF_SYNC_COUNT,
|
||||
EFCT_HW_LINK_STAT_LOSS_OF_SIGNAL_COUNT,
|
||||
EFCT_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT,
|
||||
EFCT_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT,
|
||||
EFCT_HW_LINK_STAT_CRC_COUNT,
|
||||
EFCT_HW_LINK_STAT_PRIMITIVE_SEQ_TIMEOUT_COUNT,
|
||||
EFCT_HW_LINK_STAT_ELASTIC_BUFFER_OVERRUN_COUNT,
|
||||
EFCT_HW_LINK_STAT_ARB_TIMEOUT_COUNT,
|
||||
EFCT_HW_LINK_STAT_ADVERTISED_RCV_B2B_CREDIT,
|
||||
EFCT_HW_LINK_STAT_CURR_RCV_B2B_CREDIT,
|
||||
EFCT_HW_LINK_STAT_ADVERTISED_XMIT_B2B_CREDIT,
|
||||
EFCT_HW_LINK_STAT_CURR_XMIT_B2B_CREDIT,
|
||||
EFCT_HW_LINK_STAT_RCV_EOFA_COUNT,
|
||||
EFCT_HW_LINK_STAT_RCV_EOFDTI_COUNT,
|
||||
EFCT_HW_LINK_STAT_RCV_EOFNI_COUNT,
|
||||
EFCT_HW_LINK_STAT_RCV_SOFF_COUNT,
|
||||
EFCT_HW_LINK_STAT_RCV_DROPPED_NO_AER_COUNT,
|
||||
EFCT_HW_LINK_STAT_RCV_DROPPED_NO_RPI_COUNT,
|
||||
EFCT_HW_LINK_STAT_RCV_DROPPED_NO_XRI_COUNT,
|
||||
EFCT_HW_LINK_STAT_MAX,
|
||||
};
|
||||
|
||||
enum efct_hw_host_stat {
|
||||
EFCT_HW_HOST_STAT_TX_KBYTE_COUNT,
|
||||
EFCT_HW_HOST_STAT_RX_KBYTE_COUNT,
|
||||
EFCT_HW_HOST_STAT_TX_FRAME_COUNT,
|
||||
EFCT_HW_HOST_STAT_RX_FRAME_COUNT,
|
||||
EFCT_HW_HOST_STAT_TX_SEQ_COUNT,
|
||||
EFCT_HW_HOST_STAT_RX_SEQ_COUNT,
|
||||
EFCT_HW_HOST_STAT_TOTAL_EXCH_ORIG,
|
||||
EFCT_HW_HOST_STAT_TOTAL_EXCH_RESP,
|
||||
EFCT_HW_HOSY_STAT_RX_P_BSY_COUNT,
|
||||
EFCT_HW_HOST_STAT_RX_F_BSY_COUNT,
|
||||
EFCT_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_RQ_BUF_COUNT,
|
||||
EFCT_HW_HOST_STAT_EMPTY_RQ_TIMEOUT_COUNT,
|
||||
EFCT_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_XRI_COUNT,
|
||||
EFCT_HW_HOST_STAT_EMPTY_XRI_POOL_COUNT,
|
||||
EFCT_HW_HOST_STAT_MAX,
|
||||
};
|
||||
|
||||
enum efct_hw_state {
|
||||
EFCT_HW_STATE_UNINITIALIZED,
|
||||
EFCT_HW_STATE_QUEUES_ALLOCATED,
|
||||
EFCT_HW_STATE_ACTIVE,
|
||||
EFCT_HW_STATE_RESET_IN_PROGRESS,
|
||||
EFCT_HW_STATE_TEARDOWN_IN_PROGRESS,
|
||||
};
|
||||
|
||||
struct efct_hw_link_stat_counts {
|
||||
u8 overflow;
|
||||
u32 counter;
|
||||
};
|
||||
|
||||
struct efct_hw_host_stat_counts {
|
||||
u32 counter;
|
||||
};
|
||||
|
||||
/* Structure used for the hash lookup of queue IDs */
|
||||
struct efct_queue_hash {
|
||||
bool in_use;
|
||||
u16 id;
|
||||
u16 index;
|
||||
};
|
||||
|
||||
/* WQ callback object */
|
||||
struct hw_wq_callback {
|
||||
u16 instance_index; /* use for request tag */
|
||||
void (*callback)(void *arg, u8 *cqe, int status);
|
||||
void *arg;
|
||||
struct list_head list_entry;
|
||||
};
|
||||
|
||||
struct reqtag_pool {
|
||||
spinlock_t lock; /* pool lock */
|
||||
struct hw_wq_callback *tags[U16_MAX];
|
||||
struct list_head freelist;
|
||||
};
|
||||
|
||||
struct efct_hw_config {
|
||||
u32 n_eq;
|
||||
u32 n_cq;
|
||||
u32 n_mq;
|
||||
u32 n_rq;
|
||||
u32 n_wq;
|
||||
u32 n_io;
|
||||
u32 n_sgl;
|
||||
u32 speed;
|
||||
u32 topology;
|
||||
/* size of the buffers for first burst */
|
||||
u32 rq_default_buffer_size;
|
||||
u8 esoc;
|
||||
/* MRQ RQ selection policy */
|
||||
u8 rq_selection_policy;
|
||||
/* RQ quanta if rq_selection_policy == 2 */
|
||||
u8 rr_quanta;
|
||||
u32 filter_def[SLI4_CMD_REG_FCFI_NUM_RQ_CFG];
|
||||
};
|
||||
|
||||
struct efct_hw {
|
||||
struct efct *os;
|
||||
struct sli4 sli;
|
||||
u16 ulp_start;
|
||||
u16 ulp_max;
|
||||
u32 dump_size;
|
||||
enum efct_hw_state state;
|
||||
bool hw_setup_called;
|
||||
u8 sliport_healthcheck;
|
||||
u16 fcf_indicator;
|
||||
|
||||
/* HW configuration */
|
||||
struct efct_hw_config config;
|
||||
|
||||
/* calculated queue sizes for each type */
|
||||
u32 num_qentries[SLI4_QTYPE_MAX];
|
||||
|
||||
/* Storage for SLI queue objects */
|
||||
struct sli4_queue wq[EFCT_HW_MAX_NUM_WQ];
|
||||
struct sli4_queue rq[EFCT_HW_MAX_NUM_RQ];
|
||||
u16 hw_rq_lookup[EFCT_HW_MAX_NUM_RQ];
|
||||
struct sli4_queue mq[EFCT_HW_MAX_NUM_MQ];
|
||||
struct sli4_queue cq[EFCT_HW_MAX_NUM_CQ];
|
||||
struct sli4_queue eq[EFCT_HW_MAX_NUM_EQ];
|
||||
|
||||
/* HW queue */
|
||||
u32 eq_count;
|
||||
u32 cq_count;
|
||||
u32 mq_count;
|
||||
u32 wq_count;
|
||||
u32 rq_count;
|
||||
u32 cmd_head_count;
|
||||
struct list_head eq_list;
|
||||
|
||||
struct efct_queue_hash cq_hash[EFCT_HW_Q_HASH_SIZE];
|
||||
struct efct_queue_hash rq_hash[EFCT_HW_Q_HASH_SIZE];
|
||||
struct efct_queue_hash wq_hash[EFCT_HW_Q_HASH_SIZE];
|
||||
|
||||
/* Storage for HW queue objects */
|
||||
struct hw_wq *hw_wq[EFCT_HW_MAX_NUM_WQ];
|
||||
struct hw_rq *hw_rq[EFCT_HW_MAX_NUM_RQ];
|
||||
struct hw_mq *hw_mq[EFCT_HW_MAX_NUM_MQ];
|
||||
struct hw_cq *hw_cq[EFCT_HW_MAX_NUM_CQ];
|
||||
struct hw_eq *hw_eq[EFCT_HW_MAX_NUM_EQ];
|
||||
/* count of hw_rq[] entries */
|
||||
u32 hw_rq_count;
|
||||
/* count of multirq RQs */
|
||||
u32 hw_mrq_count;
|
||||
|
||||
struct hw_wq **wq_cpu_array;
|
||||
|
||||
/* Sequence objects used in incoming frame processing */
|
||||
struct efc_hw_sequence *seq_pool;
|
||||
|
||||
/* Maintain an ordered, linked list of outstanding HW commands. */
|
||||
struct mutex bmbx_lock;
|
||||
spinlock_t cmd_lock;
|
||||
struct list_head cmd_head;
|
||||
struct list_head cmd_pending;
|
||||
mempool_t *cmd_ctx_pool;
|
||||
mempool_t *mbox_rqst_pool;
|
||||
|
||||
struct sli4_link_event link;
|
||||
|
||||
/* pointer array of IO objects */
|
||||
struct efct_hw_io **io;
|
||||
/* array of WQE buffs mapped to IO objects */
|
||||
u8 *wqe_buffs;
|
||||
|
||||
/* IO lock to synchronize list access */
|
||||
spinlock_t io_lock;
|
||||
/* List of IO objects in use */
|
||||
struct list_head io_inuse;
|
||||
/* List of IO objects waiting to be freed */
|
||||
struct list_head io_wait_free;
|
||||
/* List of IO objects available for allocation */
|
||||
struct list_head io_free;
|
||||
|
||||
struct efc_dma loop_map;
|
||||
|
||||
struct efc_dma xfer_rdy;
|
||||
|
||||
struct efc_dma rnode_mem;
|
||||
|
||||
atomic_t io_alloc_failed_count;
|
||||
|
||||
/* stat: wq sumbit count */
|
||||
u32 tcmd_wq_submit[EFCT_HW_MAX_NUM_WQ];
|
||||
/* stat: wq complete count */
|
||||
u32 tcmd_wq_complete[EFCT_HW_MAX_NUM_WQ];
|
||||
|
||||
atomic_t send_frame_seq_id;
|
||||
struct reqtag_pool *wq_reqtag_pool;
|
||||
};
|
||||
|
||||
enum efct_hw_io_count_type {
|
||||
EFCT_HW_IO_INUSE_COUNT,
|
||||
EFCT_HW_IO_FREE_COUNT,
|
||||
EFCT_HW_IO_WAIT_FREE_COUNT,
|
||||
EFCT_HW_IO_N_TOTAL_IO_COUNT,
|
||||
};
|
||||
|
||||
/* HW queue data structures */
|
||||
struct hw_eq {
|
||||
struct list_head list_entry;
|
||||
enum sli4_qtype type;
|
||||
u32 instance;
|
||||
u32 entry_count;
|
||||
u32 entry_size;
|
||||
struct efct_hw *hw;
|
||||
struct sli4_queue *queue;
|
||||
struct list_head cq_list;
|
||||
u32 use_count;
|
||||
};
|
||||
|
||||
struct hw_cq {
|
||||
struct list_head list_entry;
|
||||
enum sli4_qtype type;
|
||||
u32 instance;
|
||||
u32 entry_count;
|
||||
u32 entry_size;
|
||||
struct hw_eq *eq;
|
||||
struct sli4_queue *queue;
|
||||
struct list_head q_list;
|
||||
u32 use_count;
|
||||
};
|
||||
|
||||
struct hw_q {
|
||||
struct list_head list_entry;
|
||||
enum sli4_qtype type;
|
||||
};
|
||||
|
||||
struct hw_mq {
|
||||
struct list_head list_entry;
|
||||
enum sli4_qtype type;
|
||||
u32 instance;
|
||||
|
||||
u32 entry_count;
|
||||
u32 entry_size;
|
||||
struct hw_cq *cq;
|
||||
struct sli4_queue *queue;
|
||||
|
||||
u32 use_count;
|
||||
};
|
||||
|
||||
struct hw_wq {
|
||||
struct list_head list_entry;
|
||||
enum sli4_qtype type;
|
||||
u32 instance;
|
||||
struct efct_hw *hw;
|
||||
|
||||
u32 entry_count;
|
||||
u32 entry_size;
|
||||
struct hw_cq *cq;
|
||||
struct sli4_queue *queue;
|
||||
u32 class;
|
||||
|
||||
/* WQ consumed */
|
||||
u32 wqec_set_count;
|
||||
u32 wqec_count;
|
||||
u32 free_count;
|
||||
u32 total_submit_count;
|
||||
struct list_head pending_list;
|
||||
|
||||
/* HW IO allocated for use with Send Frame */
|
||||
struct efct_hw_io *send_frame_io;
|
||||
|
||||
/* Stats */
|
||||
u32 use_count;
|
||||
u32 wq_pending_count;
|
||||
};
|
||||
|
||||
struct hw_rq {
|
||||
struct list_head list_entry;
|
||||
enum sli4_qtype type;
|
||||
u32 instance;
|
||||
|
||||
u32 entry_count;
|
||||
u32 use_count;
|
||||
u32 hdr_entry_size;
|
||||
u32 first_burst_entry_size;
|
||||
u32 data_entry_size;
|
||||
bool is_mrq;
|
||||
u32 base_mrq_id;
|
||||
|
||||
struct hw_cq *cq;
|
||||
|
||||
u8 filter_mask;
|
||||
struct sli4_queue *hdr;
|
||||
struct sli4_queue *first_burst;
|
||||
struct sli4_queue *data;
|
||||
|
||||
struct efc_hw_rq_buffer *hdr_buf;
|
||||
struct efc_hw_rq_buffer *fb_buf;
|
||||
struct efc_hw_rq_buffer *payload_buf;
|
||||
/* RQ tracker for this RQ */
|
||||
struct efc_hw_sequence **rq_tracker;
|
||||
};
|
||||
|
||||
struct efct_hw_send_frame_context {
|
||||
struct efct_hw *hw;
|
||||
struct hw_wq_callback *wqcb;
|
||||
struct efct_hw_wqe wqe;
|
||||
void (*callback)(int status, void *arg);
|
||||
void *arg;
|
||||
|
||||
/* General purpose elements */
|
||||
struct efc_hw_sequence *seq;
|
||||
struct efc_dma payload;
|
||||
};
|
||||
|
||||
struct efct_hw_grp_hdr {
|
||||
u32 size;
|
||||
__be32 magic_number;
|
||||
u32 word2;
|
||||
u8 rev_name[128];
|
||||
u8 date[12];
|
||||
u8 revision[32];
|
||||
};
|
||||
|
||||
static inline int
|
||||
efct_hw_get_link_speed(struct efct_hw *hw) {
|
||||
return hw->link.speed;
|
||||
}
|
||||
|
||||
int
|
||||
efct_hw_setup(struct efct_hw *hw, void *os, struct pci_dev *pdev);
|
||||
int efct_hw_init(struct efct_hw *hw);
|
||||
int
|
||||
efct_hw_parse_filter(struct efct_hw *hw, void *value);
|
||||
int
|
||||
efct_hw_init_queues(struct efct_hw *hw);
|
||||
int
|
||||
efct_hw_map_wq_cpu(struct efct_hw *hw);
|
||||
uint64_t
|
||||
efct_get_wwnn(struct efct_hw *hw);
|
||||
uint64_t
|
||||
efct_get_wwpn(struct efct_hw *hw);
|
||||
|
||||
int efct_hw_rx_allocate(struct efct_hw *hw);
|
||||
int efct_hw_rx_post(struct efct_hw *hw);
|
||||
void efct_hw_rx_free(struct efct_hw *hw);
|
||||
int
|
||||
efct_hw_command(struct efct_hw *hw, u8 *cmd, u32 opts, void *cb,
|
||||
void *arg);
|
||||
int
|
||||
efct_issue_mbox_rqst(void *base, void *cmd, void *cb, void *arg);
|
||||
|
||||
struct efct_hw_io *efct_hw_io_alloc(struct efct_hw *hw);
|
||||
int efct_hw_io_free(struct efct_hw *hw, struct efct_hw_io *io);
|
||||
u8 efct_hw_io_inuse(struct efct_hw *hw, struct efct_hw_io *io);
|
||||
int
|
||||
efct_hw_io_send(struct efct_hw *hw, enum efct_hw_io_type type,
|
||||
struct efct_hw_io *io, union efct_hw_io_param_u *iparam,
|
||||
void *cb, void *arg);
|
||||
int
|
||||
efct_hw_io_register_sgl(struct efct_hw *hw, struct efct_hw_io *io,
|
||||
struct efc_dma *sgl,
|
||||
u32 sgl_count);
|
||||
int
|
||||
efct_hw_io_init_sges(struct efct_hw *hw,
|
||||
struct efct_hw_io *io, enum efct_hw_io_type type);
|
||||
|
||||
int
|
||||
efct_hw_io_add_sge(struct efct_hw *hw, struct efct_hw_io *io,
|
||||
uintptr_t addr, u32 length);
|
||||
int
|
||||
efct_hw_io_abort(struct efct_hw *hw, struct efct_hw_io *io_to_abort,
|
||||
bool send_abts, void *cb, void *arg);
|
||||
u32
|
||||
efct_hw_io_get_count(struct efct_hw *hw,
|
||||
enum efct_hw_io_count_type io_count_type);
|
||||
struct efct_hw_io
|
||||
*efct_hw_io_lookup(struct efct_hw *hw, u32 indicator);
|
||||
void efct_hw_io_abort_all(struct efct_hw *hw);
|
||||
void efct_hw_io_free_internal(struct kref *arg);
|
||||
|
||||
/* HW WQ request tag API */
|
||||
struct reqtag_pool *efct_hw_reqtag_pool_alloc(struct efct_hw *hw);
|
||||
void efct_hw_reqtag_pool_free(struct efct_hw *hw);
|
||||
struct hw_wq_callback
|
||||
*efct_hw_reqtag_alloc(struct efct_hw *hw,
|
||||
void (*callback)(void *arg, u8 *cqe,
|
||||
int status), void *arg);
|
||||
void
|
||||
efct_hw_reqtag_free(struct efct_hw *hw, struct hw_wq_callback *wqcb);
|
||||
struct hw_wq_callback
|
||||
*efct_hw_reqtag_get_instance(struct efct_hw *hw, u32 instance_index);
|
||||
|
||||
/* RQ completion handlers for RQ pair mode */
|
||||
int
|
||||
efct_hw_rqpair_process_rq(struct efct_hw *hw,
|
||||
struct hw_cq *cq, u8 *cqe);
|
||||
int
|
||||
efct_hw_rqpair_sequence_free(struct efct_hw *hw, struct efc_hw_sequence *seq);
|
||||
static inline void
|
||||
efct_hw_sequence_copy(struct efc_hw_sequence *dst,
|
||||
struct efc_hw_sequence *src)
|
||||
{
|
||||
/* Copy src to dst, then zero out the linked list link */
|
||||
*dst = *src;
|
||||
}
|
||||
|
||||
int
|
||||
efct_efc_hw_sequence_free(struct efc *efc, struct efc_hw_sequence *seq);
|
||||
|
||||
static inline int
|
||||
efct_hw_sequence_free(struct efct_hw *hw, struct efc_hw_sequence *seq)
|
||||
{
|
||||
/* Only RQ pair mode is supported */
|
||||
return efct_hw_rqpair_sequence_free(hw, seq);
|
||||
}
|
||||
|
||||
int
|
||||
efct_hw_eq_process(struct efct_hw *hw, struct hw_eq *eq,
|
||||
u32 max_isr_time_msec);
|
||||
void efct_hw_cq_process(struct efct_hw *hw, struct hw_cq *cq);
|
||||
void
|
||||
efct_hw_wq_process(struct efct_hw *hw, struct hw_cq *cq,
|
||||
u8 *cqe, int status, u16 rid);
|
||||
void
|
||||
efct_hw_xabt_process(struct efct_hw *hw, struct hw_cq *cq,
|
||||
u8 *cqe, u16 rid);
|
||||
int
|
||||
efct_hw_process(struct efct_hw *hw, u32 vector, u32 max_isr_time_msec);
|
||||
int
|
||||
efct_hw_queue_hash_find(struct efct_queue_hash *hash, u16 id);
|
||||
int efct_hw_wq_write(struct hw_wq *wq, struct efct_hw_wqe *wqe);
|
||||
int
|
||||
efct_hw_send_frame(struct efct_hw *hw, struct fc_frame_header *hdr,
|
||||
u8 sof, u8 eof, struct efc_dma *payload,
|
||||
struct efct_hw_send_frame_context *ctx,
|
||||
void (*callback)(void *arg, u8 *cqe, int status),
|
||||
void *arg);
|
||||
int
|
||||
efct_els_hw_srrs_send(struct efc *efc, struct efc_disc_io *io);
|
||||
int
|
||||
efct_efc_bls_send(struct efc *efc, u32 type, struct sli_bls_params *bls);
|
||||
int
|
||||
efct_hw_bls_send(struct efct *efct, u32 type, struct sli_bls_params *bls_params,
|
||||
void *cb, void *arg);
|
||||
|
||||
/* Function for retrieving link statistics */
|
||||
int
|
||||
efct_hw_get_link_stats(struct efct_hw *hw,
|
||||
u8 req_ext_counters,
|
||||
u8 clear_overflow_flags,
|
||||
u8 clear_all_counters,
|
||||
void (*efct_hw_link_stat_cb_t)(int status,
|
||||
u32 num_counters,
|
||||
struct efct_hw_link_stat_counts *counters, void *arg),
|
||||
void *arg);
|
||||
/* Function for retrieving host statistics */
|
||||
int
|
||||
efct_hw_get_host_stats(struct efct_hw *hw,
|
||||
u8 cc,
|
||||
void (*efct_hw_host_stat_cb_t)(int status,
|
||||
u32 num_counters,
|
||||
struct efct_hw_host_stat_counts *counters, void *arg),
|
||||
void *arg);
|
||||
int
|
||||
efct_hw_firmware_write(struct efct_hw *hw, struct efc_dma *dma,
|
||||
u32 size, u32 offset, int last,
|
||||
void (*cb)(int status, u32 bytes_written,
|
||||
u32 change_status, void *arg),
|
||||
void *arg);
|
||||
typedef void (*efct_hw_async_cb_t)(struct efct_hw *hw, int status,
|
||||
u8 *mqe, void *arg);
|
||||
int
|
||||
efct_hw_async_call(struct efct_hw *hw, efct_hw_async_cb_t callback, void *arg);
|
||||
|
||||
struct hw_eq *efct_hw_new_eq(struct efct_hw *hw, u32 entry_count);
|
||||
struct hw_cq *efct_hw_new_cq(struct hw_eq *eq, u32 entry_count);
|
||||
u32
|
||||
efct_hw_new_cq_set(struct hw_eq *eqs[], struct hw_cq *cqs[],
|
||||
u32 num_cqs, u32 entry_count);
|
||||
struct hw_mq *efct_hw_new_mq(struct hw_cq *cq, u32 entry_count);
|
||||
struct hw_wq
|
||||
*efct_hw_new_wq(struct hw_cq *cq, u32 entry_count);
|
||||
u32
|
||||
efct_hw_new_rq_set(struct hw_cq *cqs[], struct hw_rq *rqs[],
|
||||
u32 num_rq_pairs, u32 entry_count);
|
||||
void efct_hw_del_eq(struct hw_eq *eq);
|
||||
void efct_hw_del_cq(struct hw_cq *cq);
|
||||
void efct_hw_del_mq(struct hw_mq *mq);
|
||||
void efct_hw_del_wq(struct hw_wq *wq);
|
||||
void efct_hw_del_rq(struct hw_rq *rq);
|
||||
void efct_hw_queue_teardown(struct efct_hw *hw);
|
||||
void efct_hw_teardown(struct efct_hw *hw);
|
||||
int
|
||||
efct_hw_reset(struct efct_hw *hw, enum efct_hw_reset reset);
|
||||
|
||||
int
|
||||
efct_hw_port_control(struct efct_hw *hw, enum efct_hw_port ctrl,
|
||||
uintptr_t value,
|
||||
void (*cb)(int status, uintptr_t value, void *arg),
|
||||
void *arg);
|
||||
|
||||
#endif /* __EFCT_H__ */
|
|
@ -0,0 +1,677 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (C) 2021 Broadcom. All Rights Reserved. The term
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
|
||||
*/
|
||||
|
||||
#include "efct_driver.h"
|
||||
#include "efct_hw.h"
|
||||
#include "efct_unsol.h"
|
||||
|
||||
int
|
||||
efct_hw_init_queues(struct efct_hw *hw)
|
||||
{
|
||||
struct hw_eq *eq = NULL;
|
||||
struct hw_cq *cq = NULL;
|
||||
struct hw_wq *wq = NULL;
|
||||
struct hw_mq *mq = NULL;
|
||||
|
||||
struct hw_eq *eqs[EFCT_HW_MAX_NUM_EQ];
|
||||
struct hw_cq *cqs[EFCT_HW_MAX_NUM_EQ];
|
||||
struct hw_rq *rqs[EFCT_HW_MAX_NUM_EQ];
|
||||
u32 i = 0, j;
|
||||
|
||||
hw->eq_count = 0;
|
||||
hw->cq_count = 0;
|
||||
hw->mq_count = 0;
|
||||
hw->wq_count = 0;
|
||||
hw->rq_count = 0;
|
||||
hw->hw_rq_count = 0;
|
||||
INIT_LIST_HEAD(&hw->eq_list);
|
||||
|
||||
for (i = 0; i < hw->config.n_eq; i++) {
|
||||
/* Create EQ */
|
||||
eq = efct_hw_new_eq(hw, EFCT_HW_EQ_DEPTH);
|
||||
if (!eq) {
|
||||
efct_hw_queue_teardown(hw);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
eqs[i] = eq;
|
||||
|
||||
/* Create one MQ */
|
||||
if (!i) {
|
||||
cq = efct_hw_new_cq(eq,
|
||||
hw->num_qentries[SLI4_QTYPE_CQ]);
|
||||
if (!cq) {
|
||||
efct_hw_queue_teardown(hw);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
mq = efct_hw_new_mq(cq, EFCT_HW_MQ_DEPTH);
|
||||
if (!mq) {
|
||||
efct_hw_queue_teardown(hw);
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
/* Create WQ */
|
||||
cq = efct_hw_new_cq(eq, hw->num_qentries[SLI4_QTYPE_CQ]);
|
||||
if (!cq) {
|
||||
efct_hw_queue_teardown(hw);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
wq = efct_hw_new_wq(cq, hw->num_qentries[SLI4_QTYPE_WQ]);
|
||||
if (!wq) {
|
||||
efct_hw_queue_teardown(hw);
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
/* Create CQ set */
|
||||
if (efct_hw_new_cq_set(eqs, cqs, i, hw->num_qentries[SLI4_QTYPE_CQ])) {
|
||||
efct_hw_queue_teardown(hw);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/* Create RQ set */
|
||||
if (efct_hw_new_rq_set(cqs, rqs, i, EFCT_HW_RQ_ENTRIES_DEF)) {
|
||||
efct_hw_queue_teardown(hw);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
for (j = 0; j < i ; j++) {
|
||||
rqs[j]->filter_mask = 0;
|
||||
rqs[j]->is_mrq = true;
|
||||
rqs[j]->base_mrq_id = rqs[0]->hdr->id;
|
||||
}
|
||||
|
||||
hw->hw_mrq_count = i;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
efct_hw_map_wq_cpu(struct efct_hw *hw)
|
||||
{
|
||||
struct efct *efct = hw->os;
|
||||
u32 cpu = 0, i;
|
||||
|
||||
/* Init cpu_map array */
|
||||
hw->wq_cpu_array = kcalloc(num_possible_cpus(), sizeof(void *),
|
||||
GFP_KERNEL);
|
||||
if (!hw->wq_cpu_array)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < hw->config.n_eq; i++) {
|
||||
const struct cpumask *maskp;
|
||||
|
||||
/* Get a CPU mask for all CPUs affinitized to this vector */
|
||||
maskp = pci_irq_get_affinity(efct->pci, i);
|
||||
if (!maskp) {
|
||||
efc_log_debug(efct, "maskp null for vector:%d\n", i);
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Loop through all CPUs associated with vector idx */
|
||||
for_each_cpu_and(cpu, maskp, cpu_present_mask) {
|
||||
efc_log_debug(efct, "CPU:%d irq vector:%d\n", cpu, i);
|
||||
hw->wq_cpu_array[cpu] = hw->hw_wq[i];
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct hw_eq *
|
||||
efct_hw_new_eq(struct efct_hw *hw, u32 entry_count)
|
||||
{
|
||||
struct hw_eq *eq = kzalloc(sizeof(*eq), GFP_KERNEL);
|
||||
|
||||
if (!eq)
|
||||
return NULL;
|
||||
|
||||
eq->type = SLI4_QTYPE_EQ;
|
||||
eq->hw = hw;
|
||||
eq->entry_count = entry_count;
|
||||
eq->instance = hw->eq_count++;
|
||||
eq->queue = &hw->eq[eq->instance];
|
||||
INIT_LIST_HEAD(&eq->cq_list);
|
||||
|
||||
if (sli_queue_alloc(&hw->sli, SLI4_QTYPE_EQ, eq->queue, entry_count,
|
||||
NULL)) {
|
||||
efc_log_err(hw->os, "EQ[%d] alloc failure\n", eq->instance);
|
||||
kfree(eq);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
sli_eq_modify_delay(&hw->sli, eq->queue, 1, 0, 8);
|
||||
hw->hw_eq[eq->instance] = eq;
|
||||
INIT_LIST_HEAD(&eq->list_entry);
|
||||
list_add_tail(&eq->list_entry, &hw->eq_list);
|
||||
efc_log_debug(hw->os, "create eq[%2d] id %3d len %4d\n", eq->instance,
|
||||
eq->queue->id, eq->entry_count);
|
||||
return eq;
|
||||
}
|
||||
|
||||
struct hw_cq *
|
||||
efct_hw_new_cq(struct hw_eq *eq, u32 entry_count)
|
||||
{
|
||||
struct efct_hw *hw = eq->hw;
|
||||
struct hw_cq *cq = kzalloc(sizeof(*cq), GFP_KERNEL);
|
||||
|
||||
if (!cq)
|
||||
return NULL;
|
||||
|
||||
cq->eq = eq;
|
||||
cq->type = SLI4_QTYPE_CQ;
|
||||
cq->instance = eq->hw->cq_count++;
|
||||
cq->entry_count = entry_count;
|
||||
cq->queue = &hw->cq[cq->instance];
|
||||
|
||||
INIT_LIST_HEAD(&cq->q_list);
|
||||
|
||||
if (sli_queue_alloc(&hw->sli, SLI4_QTYPE_CQ, cq->queue,
|
||||
cq->entry_count, eq->queue)) {
|
||||
efc_log_err(hw->os, "CQ[%d] allocation failure len=%d\n",
|
||||
eq->instance, eq->entry_count);
|
||||
kfree(cq);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
hw->hw_cq[cq->instance] = cq;
|
||||
INIT_LIST_HEAD(&cq->list_entry);
|
||||
list_add_tail(&cq->list_entry, &eq->cq_list);
|
||||
efc_log_debug(hw->os, "create cq[%2d] id %3d len %4d\n", cq->instance,
|
||||
cq->queue->id, cq->entry_count);
|
||||
return cq;
|
||||
}
|
||||
|
||||
u32
|
||||
efct_hw_new_cq_set(struct hw_eq *eqs[], struct hw_cq *cqs[],
|
||||
u32 num_cqs, u32 entry_count)
|
||||
{
|
||||
u32 i;
|
||||
struct efct_hw *hw = eqs[0]->hw;
|
||||
struct sli4 *sli4 = &hw->sli;
|
||||
struct hw_cq *cq = NULL;
|
||||
struct sli4_queue *qs[SLI4_MAX_CQ_SET_COUNT];
|
||||
struct sli4_queue *assefct[SLI4_MAX_CQ_SET_COUNT];
|
||||
|
||||
/* Initialise CQS pointers to NULL */
|
||||
for (i = 0; i < num_cqs; i++)
|
||||
cqs[i] = NULL;
|
||||
|
||||
for (i = 0; i < num_cqs; i++) {
|
||||
cq = kzalloc(sizeof(*cq), GFP_KERNEL);
|
||||
if (!cq)
|
||||
goto error;
|
||||
|
||||
cqs[i] = cq;
|
||||
cq->eq = eqs[i];
|
||||
cq->type = SLI4_QTYPE_CQ;
|
||||
cq->instance = hw->cq_count++;
|
||||
cq->entry_count = entry_count;
|
||||
cq->queue = &hw->cq[cq->instance];
|
||||
qs[i] = cq->queue;
|
||||
assefct[i] = eqs[i]->queue;
|
||||
INIT_LIST_HEAD(&cq->q_list);
|
||||
}
|
||||
|
||||
if (sli_cq_alloc_set(sli4, qs, num_cqs, entry_count, assefct)) {
|
||||
efc_log_err(hw->os, "Failed to create CQ Set.\n");
|
||||
goto error;
|
||||
}
|
||||
|
||||
for (i = 0; i < num_cqs; i++) {
|
||||
hw->hw_cq[cqs[i]->instance] = cqs[i];
|
||||
INIT_LIST_HEAD(&cqs[i]->list_entry);
|
||||
list_add_tail(&cqs[i]->list_entry, &cqs[i]->eq->cq_list);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
error:
|
||||
for (i = 0; i < num_cqs; i++) {
|
||||
kfree(cqs[i]);
|
||||
cqs[i] = NULL;
|
||||
}
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
struct hw_mq *
|
||||
efct_hw_new_mq(struct hw_cq *cq, u32 entry_count)
|
||||
{
|
||||
struct efct_hw *hw = cq->eq->hw;
|
||||
struct hw_mq *mq = kzalloc(sizeof(*mq), GFP_KERNEL);
|
||||
|
||||
if (!mq)
|
||||
return NULL;
|
||||
|
||||
mq->cq = cq;
|
||||
mq->type = SLI4_QTYPE_MQ;
|
||||
mq->instance = cq->eq->hw->mq_count++;
|
||||
mq->entry_count = entry_count;
|
||||
mq->entry_size = EFCT_HW_MQ_DEPTH;
|
||||
mq->queue = &hw->mq[mq->instance];
|
||||
|
||||
if (sli_queue_alloc(&hw->sli, SLI4_QTYPE_MQ, mq->queue, mq->entry_size,
|
||||
cq->queue)) {
|
||||
efc_log_err(hw->os, "MQ allocation failure\n");
|
||||
kfree(mq);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
hw->hw_mq[mq->instance] = mq;
|
||||
INIT_LIST_HEAD(&mq->list_entry);
|
||||
list_add_tail(&mq->list_entry, &cq->q_list);
|
||||
efc_log_debug(hw->os, "create mq[%2d] id %3d len %4d\n", mq->instance,
|
||||
mq->queue->id, mq->entry_count);
|
||||
return mq;
|
||||
}
|
||||
|
||||
struct hw_wq *
|
||||
efct_hw_new_wq(struct hw_cq *cq, u32 entry_count)
|
||||
{
|
||||
struct efct_hw *hw = cq->eq->hw;
|
||||
struct hw_wq *wq = kzalloc(sizeof(*wq), GFP_KERNEL);
|
||||
|
||||
if (!wq)
|
||||
return NULL;
|
||||
|
||||
wq->hw = cq->eq->hw;
|
||||
wq->cq = cq;
|
||||
wq->type = SLI4_QTYPE_WQ;
|
||||
wq->instance = cq->eq->hw->wq_count++;
|
||||
wq->entry_count = entry_count;
|
||||
wq->queue = &hw->wq[wq->instance];
|
||||
wq->wqec_set_count = EFCT_HW_WQEC_SET_COUNT;
|
||||
wq->wqec_count = wq->wqec_set_count;
|
||||
wq->free_count = wq->entry_count - 1;
|
||||
INIT_LIST_HEAD(&wq->pending_list);
|
||||
|
||||
if (sli_queue_alloc(&hw->sli, SLI4_QTYPE_WQ, wq->queue,
|
||||
wq->entry_count, cq->queue)) {
|
||||
efc_log_err(hw->os, "WQ allocation failure\n");
|
||||
kfree(wq);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
hw->hw_wq[wq->instance] = wq;
|
||||
INIT_LIST_HEAD(&wq->list_entry);
|
||||
list_add_tail(&wq->list_entry, &cq->q_list);
|
||||
efc_log_debug(hw->os, "create wq[%2d] id %3d len %4d cls %d\n",
|
||||
wq->instance, wq->queue->id, wq->entry_count, wq->class);
|
||||
return wq;
|
||||
}
|
||||
|
||||
u32
|
||||
efct_hw_new_rq_set(struct hw_cq *cqs[], struct hw_rq *rqs[],
|
||||
u32 num_rq_pairs, u32 entry_count)
|
||||
{
|
||||
struct efct_hw *hw = cqs[0]->eq->hw;
|
||||
struct hw_rq *rq = NULL;
|
||||
struct sli4_queue *qs[SLI4_MAX_RQ_SET_COUNT * 2] = { NULL };
|
||||
u32 i, q_count, size;
|
||||
|
||||
/* Initialise RQS pointers */
|
||||
for (i = 0; i < num_rq_pairs; i++)
|
||||
rqs[i] = NULL;
|
||||
|
||||
/*
|
||||
* Allocate an RQ object SET, where each element in set
|
||||
* encapsulates 2 SLI queues (for rq pair)
|
||||
*/
|
||||
for (i = 0, q_count = 0; i < num_rq_pairs; i++, q_count += 2) {
|
||||
rq = kzalloc(sizeof(*rq), GFP_KERNEL);
|
||||
if (!rq)
|
||||
goto error;
|
||||
|
||||
rqs[i] = rq;
|
||||
rq->instance = hw->hw_rq_count++;
|
||||
rq->cq = cqs[i];
|
||||
rq->type = SLI4_QTYPE_RQ;
|
||||
rq->entry_count = entry_count;
|
||||
|
||||
/* Header RQ */
|
||||
rq->hdr = &hw->rq[hw->rq_count];
|
||||
rq->hdr_entry_size = EFCT_HW_RQ_HEADER_SIZE;
|
||||
hw->hw_rq_lookup[hw->rq_count] = rq->instance;
|
||||
hw->rq_count++;
|
||||
qs[q_count] = rq->hdr;
|
||||
|
||||
/* Data RQ */
|
||||
rq->data = &hw->rq[hw->rq_count];
|
||||
rq->data_entry_size = hw->config.rq_default_buffer_size;
|
||||
hw->hw_rq_lookup[hw->rq_count] = rq->instance;
|
||||
hw->rq_count++;
|
||||
qs[q_count + 1] = rq->data;
|
||||
|
||||
rq->rq_tracker = NULL;
|
||||
}
|
||||
|
||||
if (sli_fc_rq_set_alloc(&hw->sli, num_rq_pairs, qs,
|
||||
cqs[0]->queue->id,
|
||||
rqs[0]->entry_count,
|
||||
rqs[0]->hdr_entry_size,
|
||||
rqs[0]->data_entry_size)) {
|
||||
efc_log_err(hw->os, "RQ Set alloc failure for base CQ=%d\n",
|
||||
cqs[0]->queue->id);
|
||||
goto error;
|
||||
}
|
||||
|
||||
for (i = 0; i < num_rq_pairs; i++) {
|
||||
hw->hw_rq[rqs[i]->instance] = rqs[i];
|
||||
INIT_LIST_HEAD(&rqs[i]->list_entry);
|
||||
list_add_tail(&rqs[i]->list_entry, &cqs[i]->q_list);
|
||||
size = sizeof(struct efc_hw_sequence *) * rqs[i]->entry_count;
|
||||
rqs[i]->rq_tracker = kzalloc(size, GFP_KERNEL);
|
||||
if (!rqs[i]->rq_tracker)
|
||||
goto error;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
error:
|
||||
for (i = 0; i < num_rq_pairs; i++) {
|
||||
if (rqs[i]) {
|
||||
kfree(rqs[i]->rq_tracker);
|
||||
kfree(rqs[i]);
|
||||
}
|
||||
}
|
||||
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
void
|
||||
efct_hw_del_eq(struct hw_eq *eq)
|
||||
{
|
||||
struct hw_cq *cq;
|
||||
struct hw_cq *cq_next;
|
||||
|
||||
if (!eq)
|
||||
return;
|
||||
|
||||
list_for_each_entry_safe(cq, cq_next, &eq->cq_list, list_entry)
|
||||
efct_hw_del_cq(cq);
|
||||
list_del(&eq->list_entry);
|
||||
eq->hw->hw_eq[eq->instance] = NULL;
|
||||
kfree(eq);
|
||||
}
|
||||
|
||||
void
|
||||
efct_hw_del_cq(struct hw_cq *cq)
|
||||
{
|
||||
struct hw_q *q;
|
||||
struct hw_q *q_next;
|
||||
|
||||
if (!cq)
|
||||
return;
|
||||
|
||||
list_for_each_entry_safe(q, q_next, &cq->q_list, list_entry) {
|
||||
switch (q->type) {
|
||||
case SLI4_QTYPE_MQ:
|
||||
efct_hw_del_mq((struct hw_mq *)q);
|
||||
break;
|
||||
case SLI4_QTYPE_WQ:
|
||||
efct_hw_del_wq((struct hw_wq *)q);
|
||||
break;
|
||||
case SLI4_QTYPE_RQ:
|
||||
efct_hw_del_rq((struct hw_rq *)q);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
list_del(&cq->list_entry);
|
||||
cq->eq->hw->hw_cq[cq->instance] = NULL;
|
||||
kfree(cq);
|
||||
}
|
||||
|
||||
void
|
||||
efct_hw_del_mq(struct hw_mq *mq)
|
||||
{
|
||||
if (!mq)
|
||||
return;
|
||||
|
||||
list_del(&mq->list_entry);
|
||||
mq->cq->eq->hw->hw_mq[mq->instance] = NULL;
|
||||
kfree(mq);
|
||||
}
|
||||
|
||||
void
|
||||
efct_hw_del_wq(struct hw_wq *wq)
|
||||
{
|
||||
if (!wq)
|
||||
return;
|
||||
|
||||
list_del(&wq->list_entry);
|
||||
wq->cq->eq->hw->hw_wq[wq->instance] = NULL;
|
||||
kfree(wq);
|
||||
}
|
||||
|
||||
void
|
||||
efct_hw_del_rq(struct hw_rq *rq)
|
||||
{
|
||||
struct efct_hw *hw = NULL;
|
||||
|
||||
if (!rq)
|
||||
return;
|
||||
/* Free RQ tracker */
|
||||
kfree(rq->rq_tracker);
|
||||
rq->rq_tracker = NULL;
|
||||
list_del(&rq->list_entry);
|
||||
hw = rq->cq->eq->hw;
|
||||
hw->hw_rq[rq->instance] = NULL;
|
||||
kfree(rq);
|
||||
}
|
||||
|
||||
void
|
||||
efct_hw_queue_teardown(struct efct_hw *hw)
|
||||
{
|
||||
struct hw_eq *eq;
|
||||
struct hw_eq *eq_next;
|
||||
|
||||
if (!hw->eq_list.next)
|
||||
return;
|
||||
|
||||
list_for_each_entry_safe(eq, eq_next, &hw->eq_list, list_entry)
|
||||
efct_hw_del_eq(eq);
|
||||
}
|
||||
|
||||
static inline int
|
||||
efct_hw_rqpair_find(struct efct_hw *hw, u16 rq_id)
|
||||
{
|
||||
return efct_hw_queue_hash_find(hw->rq_hash, rq_id);
|
||||
}
|
||||
|
||||
static struct efc_hw_sequence *
|
||||
efct_hw_rqpair_get(struct efct_hw *hw, u16 rqindex, u16 bufindex)
|
||||
{
|
||||
struct sli4_queue *rq_hdr = &hw->rq[rqindex];
|
||||
struct efc_hw_sequence *seq = NULL;
|
||||
struct hw_rq *rq = hw->hw_rq[hw->hw_rq_lookup[rqindex]];
|
||||
unsigned long flags = 0;
|
||||
|
||||
if (bufindex >= rq_hdr->length) {
|
||||
efc_log_err(hw->os,
|
||||
"RQidx %d bufidx %d exceed ring len %d for id %d\n",
|
||||
rqindex, bufindex, rq_hdr->length, rq_hdr->id);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* rq_hdr lock also covers rqindex+1 queue */
|
||||
spin_lock_irqsave(&rq_hdr->lock, flags);
|
||||
|
||||
seq = rq->rq_tracker[bufindex];
|
||||
rq->rq_tracker[bufindex] = NULL;
|
||||
|
||||
if (!seq) {
|
||||
efc_log_err(hw->os,
|
||||
"RQbuf NULL, rqidx %d, bufidx %d, cur q idx = %d\n",
|
||||
rqindex, bufindex, rq_hdr->index);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&rq_hdr->lock, flags);
|
||||
return seq;
|
||||
}
|
||||
|
||||
int
|
||||
efct_hw_rqpair_process_rq(struct efct_hw *hw, struct hw_cq *cq,
|
||||
u8 *cqe)
|
||||
{
|
||||
u16 rq_id;
|
||||
u32 index;
|
||||
int rqindex;
|
||||
int rq_status;
|
||||
u32 h_len;
|
||||
u32 p_len;
|
||||
struct efc_hw_sequence *seq;
|
||||
struct hw_rq *rq;
|
||||
|
||||
rq_status = sli_fc_rqe_rqid_and_index(&hw->sli, cqe,
|
||||
&rq_id, &index);
|
||||
if (rq_status != 0) {
|
||||
switch (rq_status) {
|
||||
case SLI4_FC_ASYNC_RQ_BUF_LEN_EXCEEDED:
|
||||
case SLI4_FC_ASYNC_RQ_DMA_FAILURE:
|
||||
/* just get RQ buffer then return to chip */
|
||||
rqindex = efct_hw_rqpair_find(hw, rq_id);
|
||||
if (rqindex < 0) {
|
||||
efc_log_debug(hw->os,
|
||||
"status=%#x: lookup fail id=%#x\n",
|
||||
rq_status, rq_id);
|
||||
break;
|
||||
}
|
||||
|
||||
/* get RQ buffer */
|
||||
seq = efct_hw_rqpair_get(hw, rqindex, index);
|
||||
|
||||
/* return to chip */
|
||||
if (efct_hw_rqpair_sequence_free(hw, seq)) {
|
||||
efc_log_debug(hw->os,
|
||||
"status=%#x,fail rtrn buf to RQ\n",
|
||||
rq_status);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case SLI4_FC_ASYNC_RQ_INSUFF_BUF_NEEDED:
|
||||
case SLI4_FC_ASYNC_RQ_INSUFF_BUF_FRM_DISC:
|
||||
/*
|
||||
* since RQ buffers were not consumed, cannot return
|
||||
* them to chip
|
||||
*/
|
||||
efc_log_debug(hw->os, "Warning: RCQE status=%#x,\n",
|
||||
rq_status);
|
||||
fallthrough;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
rqindex = efct_hw_rqpair_find(hw, rq_id);
|
||||
if (rqindex < 0) {
|
||||
efc_log_debug(hw->os, "Error: rq_id lookup failed for id=%#x\n",
|
||||
rq_id);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
rq = hw->hw_rq[hw->hw_rq_lookup[rqindex]];
|
||||
rq->use_count++;
|
||||
|
||||
seq = efct_hw_rqpair_get(hw, rqindex, index);
|
||||
if (WARN_ON(!seq))
|
||||
return -EIO;
|
||||
|
||||
seq->hw = hw;
|
||||
|
||||
sli_fc_rqe_length(&hw->sli, cqe, &h_len, &p_len);
|
||||
seq->header->dma.len = h_len;
|
||||
seq->payload->dma.len = p_len;
|
||||
seq->fcfi = sli_fc_rqe_fcfi(&hw->sli, cqe);
|
||||
seq->hw_priv = cq->eq;
|
||||
|
||||
efct_unsolicited_cb(hw->os, seq);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
efct_hw_rqpair_put(struct efct_hw *hw, struct efc_hw_sequence *seq)
|
||||
{
|
||||
struct sli4_queue *rq_hdr = &hw->rq[seq->header->rqindex];
|
||||
struct sli4_queue *rq_payload = &hw->rq[seq->payload->rqindex];
|
||||
u32 hw_rq_index = hw->hw_rq_lookup[seq->header->rqindex];
|
||||
struct hw_rq *rq = hw->hw_rq[hw_rq_index];
|
||||
u32 phys_hdr[2];
|
||||
u32 phys_payload[2];
|
||||
int qindex_hdr;
|
||||
int qindex_payload;
|
||||
unsigned long flags = 0;
|
||||
|
||||
/* Update the RQ verification lookup tables */
|
||||
phys_hdr[0] = upper_32_bits(seq->header->dma.phys);
|
||||
phys_hdr[1] = lower_32_bits(seq->header->dma.phys);
|
||||
phys_payload[0] = upper_32_bits(seq->payload->dma.phys);
|
||||
phys_payload[1] = lower_32_bits(seq->payload->dma.phys);
|
||||
|
||||
/* rq_hdr lock also covers payload / header->rqindex+1 queue */
|
||||
spin_lock_irqsave(&rq_hdr->lock, flags);
|
||||
|
||||
/*
|
||||
* Note: The header must be posted last for buffer pair mode because
|
||||
* posting on the header queue posts the payload queue as well.
|
||||
* We do not ring the payload queue independently in RQ pair mode.
|
||||
*/
|
||||
qindex_payload = sli_rq_write(&hw->sli, rq_payload,
|
||||
(void *)phys_payload);
|
||||
qindex_hdr = sli_rq_write(&hw->sli, rq_hdr, (void *)phys_hdr);
|
||||
if (qindex_hdr < 0 ||
|
||||
qindex_payload < 0) {
|
||||
efc_log_err(hw->os, "RQ_ID=%#x write failed\n", rq_hdr->id);
|
||||
spin_unlock_irqrestore(&rq_hdr->lock, flags);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/* ensure the indexes are the same */
|
||||
WARN_ON(qindex_hdr != qindex_payload);
|
||||
|
||||
/* Update the lookup table */
|
||||
if (!rq->rq_tracker[qindex_hdr]) {
|
||||
rq->rq_tracker[qindex_hdr] = seq;
|
||||
} else {
|
||||
efc_log_debug(hw->os,
|
||||
"expected rq_tracker[%d][%d] buffer to be NULL\n",
|
||||
hw_rq_index, qindex_hdr);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&rq_hdr->lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
efct_hw_rqpair_sequence_free(struct efct_hw *hw, struct efc_hw_sequence *seq)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
/*
|
||||
* Post the data buffer first. Because in RQ pair mode, ringing the
|
||||
* doorbell of the header ring will post the data buffer as well.
|
||||
*/
|
||||
if (efct_hw_rqpair_put(hw, seq)) {
|
||||
efc_log_err(hw->os, "error writing buffers\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
int
|
||||
efct_efc_hw_sequence_free(struct efc *efc, struct efc_hw_sequence *seq)
|
||||
{
|
||||
struct efct *efct = efc->base;
|
||||
|
||||
return efct_hw_rqpair_sequence_free(&efct->hw, seq);
|
||||
}
|
|
@ -0,0 +1,191 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (C) 2021 Broadcom. All Rights Reserved. The term
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
|
||||
*/
|
||||
|
||||
#include "efct_driver.h"
|
||||
#include "efct_hw.h"
|
||||
#include "efct_io.h"
|
||||
|
||||
struct efct_io_pool {
|
||||
struct efct *efct;
|
||||
spinlock_t lock; /* IO pool lock */
|
||||
u32 io_num_ios; /* Total IOs allocated */
|
||||
struct efct_io *ios[EFCT_NUM_SCSI_IOS];
|
||||
struct list_head freelist;
|
||||
|
||||
};
|
||||
|
||||
struct efct_io_pool *
|
||||
efct_io_pool_create(struct efct *efct, u32 num_sgl)
|
||||
{
|
||||
u32 i = 0;
|
||||
struct efct_io_pool *io_pool;
|
||||
struct efct_io *io;
|
||||
|
||||
/* Allocate the IO pool */
|
||||
io_pool = kzalloc(sizeof(*io_pool), GFP_KERNEL);
|
||||
if (!io_pool)
|
||||
return NULL;
|
||||
|
||||
io_pool->efct = efct;
|
||||
INIT_LIST_HEAD(&io_pool->freelist);
|
||||
/* initialize IO pool lock */
|
||||
spin_lock_init(&io_pool->lock);
|
||||
|
||||
for (i = 0; i < EFCT_NUM_SCSI_IOS; i++) {
|
||||
io = kzalloc(sizeof(*io), GFP_KERNEL);
|
||||
if (!io)
|
||||
break;
|
||||
|
||||
io_pool->io_num_ios++;
|
||||
io_pool->ios[i] = io;
|
||||
io->tag = i;
|
||||
io->instance_index = i;
|
||||
|
||||
/* Allocate a response buffer */
|
||||
io->rspbuf.size = SCSI_RSP_BUF_LENGTH;
|
||||
io->rspbuf.virt = dma_alloc_coherent(&efct->pci->dev,
|
||||
io->rspbuf.size,
|
||||
&io->rspbuf.phys, GFP_DMA);
|
||||
if (!io->rspbuf.virt) {
|
||||
efc_log_err(efct, "dma_alloc rspbuf failed\n");
|
||||
efct_io_pool_free(io_pool);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Allocate SGL */
|
||||
io->sgl = kzalloc(sizeof(*io->sgl) * num_sgl, GFP_KERNEL);
|
||||
if (!io->sgl) {
|
||||
efct_io_pool_free(io_pool);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
memset(io->sgl, 0, sizeof(*io->sgl) * num_sgl);
|
||||
io->sgl_allocated = num_sgl;
|
||||
io->sgl_count = 0;
|
||||
|
||||
INIT_LIST_HEAD(&io->list_entry);
|
||||
list_add_tail(&io->list_entry, &io_pool->freelist);
|
||||
}
|
||||
|
||||
return io_pool;
|
||||
}
|
||||
|
||||
int
|
||||
efct_io_pool_free(struct efct_io_pool *io_pool)
|
||||
{
|
||||
struct efct *efct;
|
||||
u32 i;
|
||||
struct efct_io *io;
|
||||
|
||||
if (io_pool) {
|
||||
efct = io_pool->efct;
|
||||
|
||||
for (i = 0; i < io_pool->io_num_ios; i++) {
|
||||
io = io_pool->ios[i];
|
||||
if (!io)
|
||||
continue;
|
||||
|
||||
kfree(io->sgl);
|
||||
dma_free_coherent(&efct->pci->dev,
|
||||
io->rspbuf.size, io->rspbuf.virt,
|
||||
io->rspbuf.phys);
|
||||
memset(&io->rspbuf, 0, sizeof(struct efc_dma));
|
||||
}
|
||||
|
||||
kfree(io_pool);
|
||||
efct->xport->io_pool = NULL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct efct_io *
|
||||
efct_io_pool_io_alloc(struct efct_io_pool *io_pool)
|
||||
{
|
||||
struct efct_io *io = NULL;
|
||||
struct efct *efct;
|
||||
unsigned long flags = 0;
|
||||
|
||||
efct = io_pool->efct;
|
||||
|
||||
spin_lock_irqsave(&io_pool->lock, flags);
|
||||
|
||||
if (!list_empty(&io_pool->freelist)) {
|
||||
io = list_first_entry(&io_pool->freelist, struct efct_io,
|
||||
list_entry);
|
||||
list_del_init(&io->list_entry);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&io_pool->lock, flags);
|
||||
|
||||
if (!io)
|
||||
return NULL;
|
||||
|
||||
io->io_type = EFCT_IO_TYPE_MAX;
|
||||
io->hio_type = EFCT_HW_IO_MAX;
|
||||
io->hio = NULL;
|
||||
io->transferred = 0;
|
||||
io->efct = efct;
|
||||
io->timeout = 0;
|
||||
io->sgl_count = 0;
|
||||
io->tgt_task_tag = 0;
|
||||
io->init_task_tag = 0;
|
||||
io->hw_tag = 0;
|
||||
io->display_name = "pending";
|
||||
io->seq_init = 0;
|
||||
io->io_free = 0;
|
||||
io->release = NULL;
|
||||
atomic_add_return(1, &efct->xport->io_active_count);
|
||||
atomic_add_return(1, &efct->xport->io_total_alloc);
|
||||
return io;
|
||||
}
|
||||
|
||||
/* Free an object used to track an IO */
|
||||
void
|
||||
efct_io_pool_io_free(struct efct_io_pool *io_pool, struct efct_io *io)
|
||||
{
|
||||
struct efct *efct;
|
||||
struct efct_hw_io *hio = NULL;
|
||||
unsigned long flags = 0;
|
||||
|
||||
efct = io_pool->efct;
|
||||
|
||||
spin_lock_irqsave(&io_pool->lock, flags);
|
||||
hio = io->hio;
|
||||
io->hio = NULL;
|
||||
io->io_free = 1;
|
||||
INIT_LIST_HEAD(&io->list_entry);
|
||||
list_add(&io->list_entry, &io_pool->freelist);
|
||||
spin_unlock_irqrestore(&io_pool->lock, flags);
|
||||
|
||||
if (hio)
|
||||
efct_hw_io_free(&efct->hw, hio);
|
||||
|
||||
atomic_sub_return(1, &efct->xport->io_active_count);
|
||||
atomic_add_return(1, &efct->xport->io_total_free);
|
||||
}
|
||||
|
||||
/* Find an I/O given it's node and ox_id */
|
||||
struct efct_io *
|
||||
efct_io_find_tgt_io(struct efct *efct, struct efct_node *node,
|
||||
u16 ox_id, u16 rx_id)
|
||||
{
|
||||
struct efct_io *io = NULL;
|
||||
unsigned long flags = 0;
|
||||
u8 found = false;
|
||||
|
||||
spin_lock_irqsave(&node->active_ios_lock, flags);
|
||||
list_for_each_entry(io, &node->active_ios, list_entry) {
|
||||
if ((io->cmd_tgt && io->init_task_tag == ox_id) &&
|
||||
(rx_id == 0xffff || io->tgt_task_tag == rx_id)) {
|
||||
if (kref_get_unless_zero(&io->ref))
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&node->active_ios_lock, flags);
|
||||
return found ? io : NULL;
|
||||
}
|
|
@ -0,0 +1,174 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (C) 2021 Broadcom. All Rights Reserved. The term
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
|
||||
*/
|
||||
|
||||
#if !defined(__EFCT_IO_H__)
|
||||
#define __EFCT_IO_H__
|
||||
|
||||
#include "efct_lio.h"
|
||||
|
||||
#define EFCT_LOG_ENABLE_IO_ERRORS(efct) \
|
||||
(((efct) != NULL) ? (((efct)->logmask & (1U << 6)) != 0) : 0)
|
||||
|
||||
#define io_error_log(io, fmt, ...) \
|
||||
do { \
|
||||
if (EFCT_LOG_ENABLE_IO_ERRORS(io->efct)) \
|
||||
efc_log_warn(io->efct, fmt, ##__VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
#define SCSI_CMD_BUF_LENGTH 48
|
||||
#define SCSI_RSP_BUF_LENGTH (FCP_RESP_WITH_EXT + SCSI_SENSE_BUFFERSIZE)
|
||||
#define EFCT_NUM_SCSI_IOS 8192
|
||||
|
||||
enum efct_io_type {
|
||||
EFCT_IO_TYPE_IO = 0,
|
||||
EFCT_IO_TYPE_ELS,
|
||||
EFCT_IO_TYPE_CT,
|
||||
EFCT_IO_TYPE_CT_RESP,
|
||||
EFCT_IO_TYPE_BLS_RESP,
|
||||
EFCT_IO_TYPE_ABORT,
|
||||
|
||||
EFCT_IO_TYPE_MAX,
|
||||
};
|
||||
|
||||
enum efct_els_state {
|
||||
EFCT_ELS_REQUEST = 0,
|
||||
EFCT_ELS_REQUEST_DELAYED,
|
||||
EFCT_ELS_REQUEST_DELAY_ABORT,
|
||||
EFCT_ELS_REQ_ABORT,
|
||||
EFCT_ELS_REQ_ABORTED,
|
||||
EFCT_ELS_ABORT_IO_COMPL,
|
||||
};
|
||||
|
||||
/**
|
||||
* Scsi target IO object
|
||||
* @efct: pointer back to efct
|
||||
* @instance_index: unique instance index value
|
||||
* @io: IO display name
|
||||
* @node: pointer to node
|
||||
* @list_entry: io list entry
|
||||
* @io_pending_link: io pending list entry
|
||||
* @ref: reference counter
|
||||
* @release: release callback function
|
||||
* @init_task_tag: initiator task tag (OX_ID) for back-end and SCSI logging
|
||||
* @tgt_task_tag: target task tag (RX_ID) for back-end and SCSI logging
|
||||
* @hw_tag: HW layer unique IO id
|
||||
* @tag: unique IO identifier
|
||||
* @sgl: SGL
|
||||
* @sgl_allocated: Number of allocated SGEs
|
||||
* @sgl_count: Number of SGEs in this SGL
|
||||
* @tgt_io: backend target private IO data
|
||||
* @exp_xfer_len: expected data transfer length, based on FC header
|
||||
* @hw_priv: Declarations private to HW/SLI
|
||||
* @io_type: indicates what this struct efct_io structure is used for
|
||||
* @hio: hw io object
|
||||
* @transferred: Number of bytes transferred
|
||||
* @auto_resp: set if auto_trsp was set
|
||||
* @low_latency: set if low latency request
|
||||
* @wq_steering: selected WQ steering request
|
||||
* @wq_class: selected WQ class if steering is class
|
||||
* @xfer_req: transfer size for current request
|
||||
* @scsi_tgt_cb: target callback function
|
||||
* @scsi_tgt_cb_arg: target callback function argument
|
||||
* @abort_cb: abort callback function
|
||||
* @abort_cb_arg: abort callback function argument
|
||||
* @bls_cb: BLS callback function
|
||||
* @bls_cb_arg: BLS callback function argument
|
||||
* @tmf_cmd: TMF command being processed
|
||||
* @abort_rx_id: rx_id from the ABTS that initiated the command abort
|
||||
* @cmd_tgt: True if this is a Target command
|
||||
* @send_abts: when aborting, indicates ABTS is to be sent
|
||||
* @cmd_ini: True if this is an Initiator command
|
||||
* @seq_init: True if local node has sequence initiative
|
||||
* @iparam: iparams for hw io send call
|
||||
* @hio_type: HW IO type
|
||||
* @wire_len: wire length
|
||||
* @hw_cb: saved HW callback
|
||||
* @io_to_abort: for abort handling, pointer to IO to abort
|
||||
* @rspbuf: SCSI Response buffer
|
||||
* @timeout: Timeout value in seconds for this IO
|
||||
* @cs_ctl: CS_CTL priority for this IO
|
||||
* @io_free: Is io object in freelist
|
||||
* @app_id: application id
|
||||
*/
|
||||
struct efct_io {
|
||||
struct efct *efct;
|
||||
u32 instance_index;
|
||||
const char *display_name;
|
||||
struct efct_node *node;
|
||||
|
||||
struct list_head list_entry;
|
||||
struct list_head io_pending_link;
|
||||
struct kref ref;
|
||||
void (*release)(struct kref *arg);
|
||||
u32 init_task_tag;
|
||||
u32 tgt_task_tag;
|
||||
u32 hw_tag;
|
||||
u32 tag;
|
||||
struct efct_scsi_sgl *sgl;
|
||||
u32 sgl_allocated;
|
||||
u32 sgl_count;
|
||||
struct efct_scsi_tgt_io tgt_io;
|
||||
u32 exp_xfer_len;
|
||||
|
||||
void *hw_priv;
|
||||
|
||||
enum efct_io_type io_type;
|
||||
struct efct_hw_io *hio;
|
||||
size_t transferred;
|
||||
|
||||
bool auto_resp;
|
||||
bool low_latency;
|
||||
u8 wq_steering;
|
||||
u8 wq_class;
|
||||
u64 xfer_req;
|
||||
efct_scsi_io_cb_t scsi_tgt_cb;
|
||||
void *scsi_tgt_cb_arg;
|
||||
efct_scsi_io_cb_t abort_cb;
|
||||
void *abort_cb_arg;
|
||||
efct_scsi_io_cb_t bls_cb;
|
||||
void *bls_cb_arg;
|
||||
enum efct_scsi_tmf_cmd tmf_cmd;
|
||||
u16 abort_rx_id;
|
||||
|
||||
bool cmd_tgt;
|
||||
bool send_abts;
|
||||
bool cmd_ini;
|
||||
bool seq_init;
|
||||
union efct_hw_io_param_u iparam;
|
||||
enum efct_hw_io_type hio_type;
|
||||
u64 wire_len;
|
||||
void *hw_cb;
|
||||
|
||||
struct efct_io *io_to_abort;
|
||||
|
||||
struct efc_dma rspbuf;
|
||||
u32 timeout;
|
||||
u8 cs_ctl;
|
||||
u8 io_free;
|
||||
u32 app_id;
|
||||
};
|
||||
|
||||
struct efct_io_cb_arg {
|
||||
int status;
|
||||
int ext_status;
|
||||
void *app;
|
||||
};
|
||||
|
||||
struct efct_io_pool *
|
||||
efct_io_pool_create(struct efct *efct, u32 num_sgl);
|
||||
int
|
||||
efct_io_pool_free(struct efct_io_pool *io_pool);
|
||||
u32
|
||||
efct_io_pool_allocated(struct efct_io_pool *io_pool);
|
||||
|
||||
struct efct_io *
|
||||
efct_io_pool_io_alloc(struct efct_io_pool *io_pool);
|
||||
void
|
||||
efct_io_pool_io_free(struct efct_io_pool *io_pool, struct efct_io *io);
|
||||
struct efct_io *
|
||||
efct_io_find_tgt_io(struct efct *efct, struct efct_node *node,
|
||||
u16 ox_id, u16 rx_id);
|
||||
#endif /* __EFCT_IO_H__ */
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,189 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (C) 2021 Broadcom. All Rights Reserved. The term
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
|
||||
*/
|
||||
|
||||
#ifndef __EFCT_LIO_H__
|
||||
#define __EFCT_LIO_H__
|
||||
|
||||
#include "efct_scsi.h"
|
||||
#include <target/target_core_base.h>
|
||||
|
||||
#define efct_lio_io_printf(io, fmt, ...) \
|
||||
efc_log_debug(io->efct, \
|
||||
"[%s] [%04x][i:%04x t:%04x h:%04x]" fmt,\
|
||||
io->node->display_name, io->instance_index, \
|
||||
io->init_task_tag, io->tgt_task_tag, io->hw_tag,\
|
||||
##__VA_ARGS__)
|
||||
|
||||
#define efct_lio_tmfio_printf(io, fmt, ...) \
|
||||
efc_log_debug(io->efct, \
|
||||
"[%s] [%04x][i:%04x t:%04x h:%04x][f:%02x]" fmt,\
|
||||
io->node->display_name, io->instance_index, \
|
||||
io->init_task_tag, io->tgt_task_tag, io->hw_tag,\
|
||||
io->tgt_io.tmf, ##__VA_ARGS__)
|
||||
|
||||
#define efct_set_lio_io_state(io, value) (io->tgt_io.state |= value)
|
||||
|
||||
struct efct_lio_wq_data {
|
||||
struct efct *efct;
|
||||
void *ptr;
|
||||
struct work_struct work;
|
||||
};
|
||||
|
||||
/* Target private efct structure */
|
||||
struct efct_scsi_tgt {
|
||||
u32 max_sge;
|
||||
u32 max_sgl;
|
||||
|
||||
/*
|
||||
* Variables used to send task set full. We are using a high watermark
|
||||
* method to send task set full. We will reserve a fixed number of IOs
|
||||
* per initiator plus a fudge factor. Once we reach this number,
|
||||
* then the target will start sending task set full/busy responses.
|
||||
*/
|
||||
atomic_t initiator_count;
|
||||
atomic_t ios_in_use;
|
||||
atomic_t io_high_watermark;
|
||||
|
||||
atomic_t watermark_hit;
|
||||
int watermark_min;
|
||||
int watermark_max;
|
||||
|
||||
struct efct_lio_nport *lio_nport;
|
||||
struct efct_lio_tpg *tpg;
|
||||
|
||||
struct list_head vport_list;
|
||||
/* Protects vport list*/
|
||||
spinlock_t efct_lio_lock;
|
||||
|
||||
u64 wwnn;
|
||||
};
|
||||
|
||||
struct efct_scsi_tgt_nport {
|
||||
struct efct_lio_nport *lio_nport;
|
||||
};
|
||||
|
||||
struct efct_node {
|
||||
struct list_head list_entry;
|
||||
struct kref ref;
|
||||
void (*release)(struct kref *arg);
|
||||
struct efct *efct;
|
||||
struct efc_node *node;
|
||||
struct se_session *session;
|
||||
spinlock_t active_ios_lock;
|
||||
struct list_head active_ios;
|
||||
char display_name[EFC_NAME_LENGTH];
|
||||
u32 port_fc_id;
|
||||
u32 node_fc_id;
|
||||
u32 vpi;
|
||||
u32 rpi;
|
||||
u32 abort_cnt;
|
||||
};
|
||||
|
||||
#define EFCT_LIO_STATE_SCSI_RECV_CMD (1 << 0)
|
||||
#define EFCT_LIO_STATE_TGT_SUBMIT_CMD (1 << 1)
|
||||
#define EFCT_LIO_STATE_TFO_QUEUE_DATA_IN (1 << 2)
|
||||
#define EFCT_LIO_STATE_TFO_WRITE_PENDING (1 << 3)
|
||||
#define EFCT_LIO_STATE_TGT_EXECUTE_CMD (1 << 4)
|
||||
#define EFCT_LIO_STATE_SCSI_SEND_RD_DATA (1 << 5)
|
||||
#define EFCT_LIO_STATE_TFO_CHK_STOP_FREE (1 << 6)
|
||||
#define EFCT_LIO_STATE_SCSI_DATA_DONE (1 << 7)
|
||||
#define EFCT_LIO_STATE_TFO_QUEUE_STATUS (1 << 8)
|
||||
#define EFCT_LIO_STATE_SCSI_SEND_RSP (1 << 9)
|
||||
#define EFCT_LIO_STATE_SCSI_RSP_DONE (1 << 10)
|
||||
#define EFCT_LIO_STATE_TGT_GENERIC_FREE (1 << 11)
|
||||
#define EFCT_LIO_STATE_SCSI_RECV_TMF (1 << 12)
|
||||
#define EFCT_LIO_STATE_TGT_SUBMIT_TMR (1 << 13)
|
||||
#define EFCT_LIO_STATE_TFO_WRITE_PEND_STATUS (1 << 14)
|
||||
#define EFCT_LIO_STATE_TGT_GENERIC_REQ_FAILURE (1 << 15)
|
||||
|
||||
#define EFCT_LIO_STATE_TFO_ABORTED_TASK (1 << 29)
|
||||
#define EFCT_LIO_STATE_TFO_RELEASE_CMD (1 << 30)
|
||||
#define EFCT_LIO_STATE_SCSI_CMPL_CMD (1u << 31)
|
||||
|
||||
struct efct_scsi_tgt_io {
|
||||
struct se_cmd cmd;
|
||||
unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
|
||||
enum dma_data_direction ddir;
|
||||
int task_attr;
|
||||
u64 lun;
|
||||
|
||||
u32 state;
|
||||
u8 tmf;
|
||||
struct efct_io *io_to_abort;
|
||||
u32 seg_map_cnt;
|
||||
u32 seg_cnt;
|
||||
u32 cur_seg;
|
||||
enum efct_scsi_io_status err;
|
||||
bool aborting;
|
||||
bool rsp_sent;
|
||||
u32 transferred_len;
|
||||
};
|
||||
|
||||
/* Handler return codes */
|
||||
enum {
|
||||
SCSI_HANDLER_DATAPHASE_STARTED = 1,
|
||||
SCSI_HANDLER_RESP_STARTED,
|
||||
SCSI_HANDLER_VALIDATED_DATAPHASE_STARTED,
|
||||
SCSI_CMD_NOT_SUPPORTED,
|
||||
};
|
||||
|
||||
#define WWN_NAME_LEN 32
|
||||
struct efct_lio_vport {
|
||||
u64 wwpn;
|
||||
u64 npiv_wwpn;
|
||||
u64 npiv_wwnn;
|
||||
unsigned char wwpn_str[WWN_NAME_LEN];
|
||||
struct se_wwn vport_wwn;
|
||||
struct efct_lio_tpg *tpg;
|
||||
struct efct *efct;
|
||||
struct Scsi_Host *shost;
|
||||
struct fc_vport *fc_vport;
|
||||
atomic_t enable;
|
||||
};
|
||||
|
||||
struct efct_lio_nport {
|
||||
u64 wwpn;
|
||||
unsigned char wwpn_str[WWN_NAME_LEN];
|
||||
struct se_wwn nport_wwn;
|
||||
struct efct_lio_tpg *tpg;
|
||||
struct efct *efct;
|
||||
atomic_t enable;
|
||||
};
|
||||
|
||||
struct efct_lio_tpg_attrib {
|
||||
u32 generate_node_acls;
|
||||
u32 cache_dynamic_acls;
|
||||
u32 demo_mode_write_protect;
|
||||
u32 prod_mode_write_protect;
|
||||
u32 demo_mode_login_only;
|
||||
bool session_deletion_wait;
|
||||
};
|
||||
|
||||
struct efct_lio_tpg {
|
||||
struct se_portal_group tpg;
|
||||
struct efct_lio_nport *nport;
|
||||
struct efct_lio_vport *vport;
|
||||
struct efct_lio_tpg_attrib tpg_attrib;
|
||||
unsigned short tpgt;
|
||||
bool enabled;
|
||||
};
|
||||
|
||||
struct efct_lio_nacl {
|
||||
u64 nport_wwnn;
|
||||
char nport_name[WWN_NAME_LEN];
|
||||
struct se_session *session;
|
||||
struct se_node_acl se_node_acl;
|
||||
};
|
||||
|
||||
struct efct_lio_vport_list_t {
|
||||
struct list_head list_entry;
|
||||
struct efct_lio_vport *lio_vport;
|
||||
};
|
||||
|
||||
int efct_scsi_tgt_driver_init(void);
|
||||
int efct_scsi_tgt_driver_exit(void);
|
||||
|
||||
#endif /*__EFCT_LIO_H__ */
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,203 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (C) 2021 Broadcom. All Rights Reserved. The term
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
|
||||
*/
|
||||
|
||||
#if !defined(__EFCT_SCSI_H__)
|
||||
#define __EFCT_SCSI_H__
|
||||
#include <scsi/scsi_host.h>
|
||||
#include <scsi/scsi_transport_fc.h>
|
||||
|
||||
/* efct_scsi_rcv_cmd() efct_scsi_rcv_tmf() flags */
|
||||
#define EFCT_SCSI_CMD_DIR_IN (1 << 0)
|
||||
#define EFCT_SCSI_CMD_DIR_OUT (1 << 1)
|
||||
#define EFCT_SCSI_CMD_SIMPLE (1 << 2)
|
||||
#define EFCT_SCSI_CMD_HEAD_OF_QUEUE (1 << 3)
|
||||
#define EFCT_SCSI_CMD_ORDERED (1 << 4)
|
||||
#define EFCT_SCSI_CMD_UNTAGGED (1 << 5)
|
||||
#define EFCT_SCSI_CMD_ACA (1 << 6)
|
||||
#define EFCT_SCSI_FIRST_BURST_ERR (1 << 7)
|
||||
#define EFCT_SCSI_FIRST_BURST_ABORTED (1 << 8)
|
||||
|
||||
/* efct_scsi_send_rd_data/recv_wr_data/send_resp flags */
|
||||
#define EFCT_SCSI_LAST_DATAPHASE (1 << 0)
|
||||
#define EFCT_SCSI_NO_AUTO_RESPONSE (1 << 1)
|
||||
#define EFCT_SCSI_LOW_LATENCY (1 << 2)
|
||||
|
||||
#define EFCT_SCSI_SNS_BUF_VALID(sense) ((sense) && \
|
||||
(0x70 == (((const u8 *)(sense))[0] & 0x70)))
|
||||
|
||||
#define EFCT_SCSI_WQ_STEERING_SHIFT 16
|
||||
#define EFCT_SCSI_WQ_STEERING_MASK (0xf << EFCT_SCSI_WQ_STEERING_SHIFT)
|
||||
#define EFCT_SCSI_WQ_STEERING_CLASS (0 << EFCT_SCSI_WQ_STEERING_SHIFT)
|
||||
#define EFCT_SCSI_WQ_STEERING_REQUEST (1 << EFCT_SCSI_WQ_STEERING_SHIFT)
|
||||
#define EFCT_SCSI_WQ_STEERING_CPU (2 << EFCT_SCSI_WQ_STEERING_SHIFT)
|
||||
|
||||
#define EFCT_SCSI_WQ_CLASS_SHIFT (20)
|
||||
#define EFCT_SCSI_WQ_CLASS_MASK (0xf << EFCT_SCSI_WQ_CLASS_SHIFT)
|
||||
#define EFCT_SCSI_WQ_CLASS(x) ((x & EFCT_SCSI_WQ_CLASS_MASK) << \
|
||||
EFCT_SCSI_WQ_CLASS_SHIFT)
|
||||
|
||||
#define EFCT_SCSI_WQ_CLASS_LOW_LATENCY 1
|
||||
|
||||
struct efct_scsi_cmd_resp {
|
||||
u8 scsi_status;
|
||||
u16 scsi_status_qualifier;
|
||||
u8 *response_data;
|
||||
u32 response_data_length;
|
||||
u8 *sense_data;
|
||||
u32 sense_data_length;
|
||||
int residual;
|
||||
u32 response_wire_length;
|
||||
};
|
||||
|
||||
struct efct_vport {
|
||||
struct efct *efct;
|
||||
bool is_vport;
|
||||
struct fc_host_statistics fc_host_stats;
|
||||
struct Scsi_Host *shost;
|
||||
struct fc_vport *fc_vport;
|
||||
u64 npiv_wwpn;
|
||||
u64 npiv_wwnn;
|
||||
};
|
||||
|
||||
/* Status values returned by IO callbacks */
|
||||
enum efct_scsi_io_status {
|
||||
EFCT_SCSI_STATUS_GOOD = 0,
|
||||
EFCT_SCSI_STATUS_ABORTED,
|
||||
EFCT_SCSI_STATUS_ERROR,
|
||||
EFCT_SCSI_STATUS_DIF_GUARD_ERR,
|
||||
EFCT_SCSI_STATUS_DIF_REF_TAG_ERROR,
|
||||
EFCT_SCSI_STATUS_DIF_APP_TAG_ERROR,
|
||||
EFCT_SCSI_STATUS_DIF_UNKNOWN_ERROR,
|
||||
EFCT_SCSI_STATUS_PROTOCOL_CRC_ERROR,
|
||||
EFCT_SCSI_STATUS_NO_IO,
|
||||
EFCT_SCSI_STATUS_ABORT_IN_PROGRESS,
|
||||
EFCT_SCSI_STATUS_CHECK_RESPONSE,
|
||||
EFCT_SCSI_STATUS_COMMAND_TIMEOUT,
|
||||
EFCT_SCSI_STATUS_TIMEDOUT_AND_ABORTED,
|
||||
EFCT_SCSI_STATUS_SHUTDOWN,
|
||||
EFCT_SCSI_STATUS_NEXUS_LOST,
|
||||
};
|
||||
|
||||
struct efct_node;
|
||||
struct efct_io;
|
||||
struct efc_node;
|
||||
struct efc_nport;
|
||||
|
||||
/* Callback used by send_rd_data(), recv_wr_data(), send_resp() */
|
||||
typedef int (*efct_scsi_io_cb_t)(struct efct_io *io,
|
||||
enum efct_scsi_io_status status,
|
||||
u32 flags, void *arg);
|
||||
|
||||
/* Callback used by send_rd_io(), send_wr_io() */
|
||||
typedef int (*efct_scsi_rsp_io_cb_t)(struct efct_io *io,
|
||||
enum efct_scsi_io_status status,
|
||||
struct efct_scsi_cmd_resp *rsp,
|
||||
u32 flags, void *arg);
|
||||
|
||||
/* efct_scsi_cb_t flags */
|
||||
#define EFCT_SCSI_IO_CMPL (1 << 0)
|
||||
/* IO completed, response sent */
|
||||
#define EFCT_SCSI_IO_CMPL_RSP_SENT (1 << 1)
|
||||
#define EFCT_SCSI_IO_ABORTED (1 << 2)
|
||||
|
||||
/* efct_scsi_recv_tmf() request values */
|
||||
enum efct_scsi_tmf_cmd {
|
||||
EFCT_SCSI_TMF_ABORT_TASK = 1,
|
||||
EFCT_SCSI_TMF_QUERY_TASK_SET,
|
||||
EFCT_SCSI_TMF_ABORT_TASK_SET,
|
||||
EFCT_SCSI_TMF_CLEAR_TASK_SET,
|
||||
EFCT_SCSI_TMF_QUERY_ASYNCHRONOUS_EVENT,
|
||||
EFCT_SCSI_TMF_LOGICAL_UNIT_RESET,
|
||||
EFCT_SCSI_TMF_CLEAR_ACA,
|
||||
EFCT_SCSI_TMF_TARGET_RESET,
|
||||
};
|
||||
|
||||
/* efct_scsi_send_tmf_resp() response values */
|
||||
enum efct_scsi_tmf_resp {
|
||||
EFCT_SCSI_TMF_FUNCTION_COMPLETE = 1,
|
||||
EFCT_SCSI_TMF_FUNCTION_SUCCEEDED,
|
||||
EFCT_SCSI_TMF_FUNCTION_IO_NOT_FOUND,
|
||||
EFCT_SCSI_TMF_FUNCTION_REJECTED,
|
||||
EFCT_SCSI_TMF_INCORRECT_LOGICAL_UNIT_NUMBER,
|
||||
EFCT_SCSI_TMF_SERVICE_DELIVERY,
|
||||
};
|
||||
|
||||
struct efct_scsi_sgl {
|
||||
uintptr_t addr;
|
||||
uintptr_t dif_addr;
|
||||
size_t len;
|
||||
};
|
||||
|
||||
enum efct_scsi_io_role {
|
||||
EFCT_SCSI_IO_ROLE_ORIGINATOR,
|
||||
EFCT_SCSI_IO_ROLE_RESPONDER,
|
||||
};
|
||||
|
||||
struct efct_io *
|
||||
efct_scsi_io_alloc(struct efct_node *node);
|
||||
void efct_scsi_io_free(struct efct_io *io);
|
||||
struct efct_io *efct_io_get_instance(struct efct *efct, u32 index);
|
||||
|
||||
int efct_scsi_tgt_driver_init(void);
|
||||
int efct_scsi_tgt_driver_exit(void);
|
||||
int efct_scsi_tgt_new_device(struct efct *efct);
|
||||
int efct_scsi_tgt_del_device(struct efct *efct);
|
||||
int
|
||||
efct_scsi_tgt_new_nport(struct efc *efc, struct efc_nport *nport);
|
||||
void
|
||||
efct_scsi_tgt_del_nport(struct efc *efc, struct efc_nport *nport);
|
||||
|
||||
int
|
||||
efct_scsi_new_initiator(struct efc *efc, struct efc_node *node);
|
||||
|
||||
enum efct_scsi_del_initiator_reason {
|
||||
EFCT_SCSI_INITIATOR_DELETED,
|
||||
EFCT_SCSI_INITIATOR_MISSING,
|
||||
};
|
||||
|
||||
int
|
||||
efct_scsi_del_initiator(struct efc *efc, struct efc_node *node, int reason);
|
||||
void
|
||||
efct_scsi_recv_cmd(struct efct_io *io, uint64_t lun, u8 *cdb, u32 cdb_len,
|
||||
u32 flags);
|
||||
int
|
||||
efct_scsi_recv_tmf(struct efct_io *tmfio, u32 lun, enum efct_scsi_tmf_cmd cmd,
|
||||
struct efct_io *abortio, u32 flags);
|
||||
int
|
||||
efct_scsi_send_rd_data(struct efct_io *io, u32 flags, struct efct_scsi_sgl *sgl,
|
||||
u32 sgl_count, u64 wire_len, efct_scsi_io_cb_t cb, void *arg);
|
||||
int
|
||||
efct_scsi_recv_wr_data(struct efct_io *io, u32 flags, struct efct_scsi_sgl *sgl,
|
||||
u32 sgl_count, u64 wire_len, efct_scsi_io_cb_t cb, void *arg);
|
||||
int
|
||||
efct_scsi_send_resp(struct efct_io *io, u32 flags,
|
||||
struct efct_scsi_cmd_resp *rsp, efct_scsi_io_cb_t cb, void *arg);
|
||||
int
|
||||
efct_scsi_send_tmf_resp(struct efct_io *io, enum efct_scsi_tmf_resp rspcode,
|
||||
u8 addl_rsp_info[3], efct_scsi_io_cb_t cb, void *arg);
|
||||
int
|
||||
efct_scsi_tgt_abort_io(struct efct_io *io, efct_scsi_io_cb_t cb, void *arg);
|
||||
|
||||
void efct_scsi_io_complete(struct efct_io *io);
|
||||
|
||||
int efct_scsi_reg_fc_transport(void);
|
||||
void efct_scsi_release_fc_transport(void);
|
||||
int efct_scsi_new_device(struct efct *efct);
|
||||
void efct_scsi_del_device(struct efct *efct);
|
||||
void _efct_scsi_io_free(struct kref *arg);
|
||||
|
||||
int
|
||||
efct_scsi_del_vport(struct efct *efct, struct Scsi_Host *shost);
|
||||
struct efct_vport *
|
||||
efct_scsi_new_vport(struct efct *efct, struct device *dev);
|
||||
|
||||
int efct_scsi_io_dispatch(struct efct_io *io, void *cb);
|
||||
int efct_scsi_io_dispatch_abort(struct efct_io *io, void *cb);
|
||||
void efct_scsi_check_pending(struct efct *efct);
|
||||
struct efct_io *
|
||||
efct_bls_send_rjt(struct efct_io *io, struct fc_frame_header *hdr);
|
||||
|
||||
#endif /* __EFCT_SCSI_H__ */
|
|
@ -0,0 +1,492 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (C) 2021 Broadcom. All Rights Reserved. The term
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
|
||||
*/
|
||||
|
||||
#include "efct_driver.h"
|
||||
#include "efct_unsol.h"
|
||||
|
||||
#define frame_printf(efct, hdr, fmt, ...) \
|
||||
do { \
|
||||
char s_id_text[16]; \
|
||||
efc_node_fcid_display(ntoh24((hdr)->fh_s_id), \
|
||||
s_id_text, sizeof(s_id_text)); \
|
||||
efc_log_debug(efct, "[%06x.%s] %02x/%04x/%04x: " fmt, \
|
||||
ntoh24((hdr)->fh_d_id), s_id_text, \
|
||||
(hdr)->fh_r_ctl, be16_to_cpu((hdr)->fh_ox_id), \
|
||||
be16_to_cpu((hdr)->fh_rx_id), ##__VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
static struct efct_node *
|
||||
efct_node_find(struct efct *efct, u32 port_id, u32 node_id)
|
||||
{
|
||||
struct efct_node *node;
|
||||
u64 id = (u64)port_id << 32 | node_id;
|
||||
|
||||
/*
|
||||
* During node shutdown, Lookup will be removed first,
|
||||
* before announcing to backend. So, no new IOs will be allowed
|
||||
*/
|
||||
/* Find a target node, given s_id and d_id */
|
||||
node = xa_load(&efct->lookup, id);
|
||||
if (node)
|
||||
kref_get(&node->ref);
|
||||
|
||||
return node;
|
||||
}
|
||||
|
||||
static int
|
||||
efct_dispatch_frame(struct efct *efct, struct efc_hw_sequence *seq)
|
||||
{
|
||||
struct efct_node *node;
|
||||
struct fc_frame_header *hdr;
|
||||
u32 s_id, d_id;
|
||||
|
||||
hdr = seq->header->dma.virt;
|
||||
|
||||
/* extract the s_id and d_id */
|
||||
s_id = ntoh24(hdr->fh_s_id);
|
||||
d_id = ntoh24(hdr->fh_d_id);
|
||||
|
||||
if (!(hdr->fh_type == FC_TYPE_FCP || hdr->fh_type == FC_TYPE_BLS))
|
||||
return -EIO;
|
||||
|
||||
if (hdr->fh_type == FC_TYPE_FCP) {
|
||||
node = efct_node_find(efct, d_id, s_id);
|
||||
if (!node) {
|
||||
efc_log_err(efct,
|
||||
"Node not found, drop cmd d_id:%x s_id:%x\n",
|
||||
d_id, s_id);
|
||||
efct_hw_sequence_free(&efct->hw, seq);
|
||||
return 0;
|
||||
}
|
||||
|
||||
efct_dispatch_fcp_cmd(node, seq);
|
||||
} else {
|
||||
node = efct_node_find(efct, d_id, s_id);
|
||||
if (!node) {
|
||||
efc_log_err(efct, "ABTS: Node not found, d_id:%x s_id:%x\n",
|
||||
d_id, s_id);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
efc_log_err(efct, "Received ABTS for Node:%p\n", node);
|
||||
efct_node_recv_abts_frame(node, seq);
|
||||
}
|
||||
|
||||
kref_put(&node->ref, node->release);
|
||||
efct_hw_sequence_free(&efct->hw, seq);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
efct_unsolicited_cb(void *arg, struct efc_hw_sequence *seq)
|
||||
{
|
||||
struct efct *efct = arg;
|
||||
|
||||
/* Process FCP command */
|
||||
if (!efct_dispatch_frame(efct, seq))
|
||||
return 0;
|
||||
|
||||
/* Forward frame to discovery lib */
|
||||
efc_dispatch_frame(efct->efcport, seq);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
efct_fc_tmf_rejected_cb(struct efct_io *io,
|
||||
enum efct_scsi_io_status scsi_status,
|
||||
u32 flags, void *arg)
|
||||
{
|
||||
efct_scsi_io_free(io);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
efct_dispatch_unsol_tmf(struct efct_io *io, u8 tm_flags, u32 lun)
|
||||
{
|
||||
u32 i;
|
||||
struct {
|
||||
u32 mask;
|
||||
enum efct_scsi_tmf_cmd cmd;
|
||||
} tmflist[] = {
|
||||
{FCP_TMF_ABT_TASK_SET, EFCT_SCSI_TMF_ABORT_TASK_SET},
|
||||
{FCP_TMF_CLR_TASK_SET, EFCT_SCSI_TMF_CLEAR_TASK_SET},
|
||||
{FCP_TMF_LUN_RESET, EFCT_SCSI_TMF_LOGICAL_UNIT_RESET},
|
||||
{FCP_TMF_TGT_RESET, EFCT_SCSI_TMF_TARGET_RESET},
|
||||
{FCP_TMF_CLR_ACA, EFCT_SCSI_TMF_CLEAR_ACA} };
|
||||
|
||||
io->exp_xfer_len = 0;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(tmflist); i++) {
|
||||
if (tmflist[i].mask & tm_flags) {
|
||||
io->tmf_cmd = tmflist[i].cmd;
|
||||
efct_scsi_recv_tmf(io, lun, tmflist[i].cmd, NULL, 0);
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (i == ARRAY_SIZE(tmflist)) {
|
||||
/* Not handled */
|
||||
efc_log_err(io->node->efct, "TMF x%x rejected\n", tm_flags);
|
||||
efct_scsi_send_tmf_resp(io, EFCT_SCSI_TMF_FUNCTION_REJECTED,
|
||||
NULL, efct_fc_tmf_rejected_cb, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
efct_validate_fcp_cmd(struct efct *efct, struct efc_hw_sequence *seq)
|
||||
{
|
||||
/*
|
||||
* If we received less than FCP_CMND_IU bytes, assume that the frame is
|
||||
* corrupted in some way and drop it.
|
||||
* This was seen when jamming the FCTL
|
||||
* fill bytes field.
|
||||
*/
|
||||
if (seq->payload->dma.len < sizeof(struct fcp_cmnd)) {
|
||||
struct fc_frame_header *fchdr = seq->header->dma.virt;
|
||||
|
||||
efc_log_debug(efct,
|
||||
"drop ox_id %04x payload (%zd) less than (%zd)\n",
|
||||
be16_to_cpu(fchdr->fh_ox_id),
|
||||
seq->payload->dma.len, sizeof(struct fcp_cmnd));
|
||||
return -EIO;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
efct_populate_io_fcp_cmd(struct efct_io *io, struct fcp_cmnd *cmnd,
|
||||
struct fc_frame_header *fchdr, bool sit)
|
||||
{
|
||||
io->init_task_tag = be16_to_cpu(fchdr->fh_ox_id);
|
||||
/* note, tgt_task_tag, hw_tag set when HW io is allocated */
|
||||
io->exp_xfer_len = be32_to_cpu(cmnd->fc_dl);
|
||||
io->transferred = 0;
|
||||
|
||||
/* The upper 7 bits of CS_CTL is the frame priority thru the SAN.
|
||||
* Our assertion here is, the priority given to a frame containing
|
||||
* the FCP cmd should be the priority given to ALL frames contained
|
||||
* in that IO. Thus we need to save the incoming CS_CTL here.
|
||||
*/
|
||||
if (ntoh24(fchdr->fh_f_ctl) & FC_FC_RES_B17)
|
||||
io->cs_ctl = fchdr->fh_cs_ctl;
|
||||
else
|
||||
io->cs_ctl = 0;
|
||||
|
||||
io->seq_init = sit;
|
||||
}
|
||||
|
||||
static u32
|
||||
efct_get_flags_fcp_cmd(struct fcp_cmnd *cmnd)
|
||||
{
|
||||
u32 flags = 0;
|
||||
|
||||
switch (cmnd->fc_pri_ta & FCP_PTA_MASK) {
|
||||
case FCP_PTA_SIMPLE:
|
||||
flags |= EFCT_SCSI_CMD_SIMPLE;
|
||||
break;
|
||||
case FCP_PTA_HEADQ:
|
||||
flags |= EFCT_SCSI_CMD_HEAD_OF_QUEUE;
|
||||
break;
|
||||
case FCP_PTA_ORDERED:
|
||||
flags |= EFCT_SCSI_CMD_ORDERED;
|
||||
break;
|
||||
case FCP_PTA_ACA:
|
||||
flags |= EFCT_SCSI_CMD_ACA;
|
||||
break;
|
||||
}
|
||||
if (cmnd->fc_flags & FCP_CFL_WRDATA)
|
||||
flags |= EFCT_SCSI_CMD_DIR_IN;
|
||||
if (cmnd->fc_flags & FCP_CFL_RDDATA)
|
||||
flags |= EFCT_SCSI_CMD_DIR_OUT;
|
||||
|
||||
return flags;
|
||||
}
|
||||
|
||||
static void
|
||||
efct_sframe_common_send_cb(void *arg, u8 *cqe, int status)
|
||||
{
|
||||
struct efct_hw_send_frame_context *ctx = arg;
|
||||
struct efct_hw *hw = ctx->hw;
|
||||
|
||||
/* Free WQ completion callback */
|
||||
efct_hw_reqtag_free(hw, ctx->wqcb);
|
||||
|
||||
/* Free sequence */
|
||||
efct_hw_sequence_free(hw, ctx->seq);
|
||||
}
|
||||
|
||||
static int
|
||||
efct_sframe_common_send(struct efct_node *node,
|
||||
struct efc_hw_sequence *seq,
|
||||
enum fc_rctl r_ctl, u32 f_ctl,
|
||||
u8 type, void *payload, u32 payload_len)
|
||||
{
|
||||
struct efct *efct = node->efct;
|
||||
struct efct_hw *hw = &efct->hw;
|
||||
int rc = 0;
|
||||
struct fc_frame_header *req_hdr = seq->header->dma.virt;
|
||||
struct fc_frame_header hdr;
|
||||
struct efct_hw_send_frame_context *ctx;
|
||||
|
||||
u32 heap_size = seq->payload->dma.size;
|
||||
uintptr_t heap_phys_base = seq->payload->dma.phys;
|
||||
u8 *heap_virt_base = seq->payload->dma.virt;
|
||||
u32 heap_offset = 0;
|
||||
|
||||
/* Build the FC header reusing the RQ header DMA buffer */
|
||||
memset(&hdr, 0, sizeof(hdr));
|
||||
hdr.fh_r_ctl = r_ctl;
|
||||
/* send it back to whomever sent it to us */
|
||||
memcpy(hdr.fh_d_id, req_hdr->fh_s_id, sizeof(hdr.fh_d_id));
|
||||
memcpy(hdr.fh_s_id, req_hdr->fh_d_id, sizeof(hdr.fh_s_id));
|
||||
hdr.fh_type = type;
|
||||
hton24(hdr.fh_f_ctl, f_ctl);
|
||||
hdr.fh_ox_id = req_hdr->fh_ox_id;
|
||||
hdr.fh_rx_id = req_hdr->fh_rx_id;
|
||||
hdr.fh_cs_ctl = 0;
|
||||
hdr.fh_df_ctl = 0;
|
||||
hdr.fh_seq_cnt = 0;
|
||||
hdr.fh_parm_offset = 0;
|
||||
|
||||
/*
|
||||
* send_frame_seq_id is an atomic, we just let it increment,
|
||||
* while storing only the low 8 bits to hdr->seq_id
|
||||
*/
|
||||
hdr.fh_seq_id = (u8)atomic_add_return(1, &hw->send_frame_seq_id);
|
||||
hdr.fh_seq_id--;
|
||||
|
||||
/* Allocate and fill in the send frame request context */
|
||||
ctx = (void *)(heap_virt_base + heap_offset);
|
||||
heap_offset += sizeof(*ctx);
|
||||
if (heap_offset > heap_size) {
|
||||
efc_log_err(efct, "Fill send frame failed offset %d size %d\n",
|
||||
heap_offset, heap_size);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
memset(ctx, 0, sizeof(*ctx));
|
||||
|
||||
/* Save sequence */
|
||||
ctx->seq = seq;
|
||||
|
||||
/* Allocate a response payload DMA buffer from the heap */
|
||||
ctx->payload.phys = heap_phys_base + heap_offset;
|
||||
ctx->payload.virt = heap_virt_base + heap_offset;
|
||||
ctx->payload.size = payload_len;
|
||||
ctx->payload.len = payload_len;
|
||||
heap_offset += payload_len;
|
||||
if (heap_offset > heap_size) {
|
||||
efc_log_err(efct, "Fill send frame failed offset %d size %d\n",
|
||||
heap_offset, heap_size);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/* Copy the payload in */
|
||||
memcpy(ctx->payload.virt, payload, payload_len);
|
||||
|
||||
/* Send */
|
||||
rc = efct_hw_send_frame(&efct->hw, (void *)&hdr, FC_SOF_N3,
|
||||
FC_EOF_T, &ctx->payload, ctx,
|
||||
efct_sframe_common_send_cb, ctx);
|
||||
if (rc)
|
||||
efc_log_debug(efct, "efct_hw_send_frame failed: %d\n", rc);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int
|
||||
efct_sframe_send_fcp_rsp(struct efct_node *node, struct efc_hw_sequence *seq,
|
||||
void *rsp, u32 rsp_len)
|
||||
{
|
||||
return efct_sframe_common_send(node, seq, FC_RCTL_DD_CMD_STATUS,
|
||||
FC_FC_EX_CTX |
|
||||
FC_FC_LAST_SEQ |
|
||||
FC_FC_END_SEQ |
|
||||
FC_FC_SEQ_INIT,
|
||||
FC_TYPE_FCP,
|
||||
rsp, rsp_len);
|
||||
}
|
||||
|
||||
static int
|
||||
efct_sframe_send_task_set_full_or_busy(struct efct_node *node,
|
||||
struct efc_hw_sequence *seq)
|
||||
{
|
||||
struct fcp_resp_with_ext fcprsp;
|
||||
struct fcp_cmnd *fcpcmd = seq->payload->dma.virt;
|
||||
int rc = 0;
|
||||
unsigned long flags = 0;
|
||||
struct efct *efct = node->efct;
|
||||
|
||||
/* construct task set full or busy response */
|
||||
memset(&fcprsp, 0, sizeof(fcprsp));
|
||||
spin_lock_irqsave(&node->active_ios_lock, flags);
|
||||
fcprsp.resp.fr_status = list_empty(&node->active_ios) ?
|
||||
SAM_STAT_BUSY : SAM_STAT_TASK_SET_FULL;
|
||||
spin_unlock_irqrestore(&node->active_ios_lock, flags);
|
||||
*((u32 *)&fcprsp.ext.fr_resid) = be32_to_cpu(fcpcmd->fc_dl);
|
||||
|
||||
/* send it using send_frame */
|
||||
rc = efct_sframe_send_fcp_rsp(node, seq, &fcprsp, sizeof(fcprsp));
|
||||
if (rc)
|
||||
efc_log_debug(efct, "efct_sframe_send_fcp_rsp failed %d\n", rc);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
int
|
||||
efct_dispatch_fcp_cmd(struct efct_node *node, struct efc_hw_sequence *seq)
|
||||
{
|
||||
struct efct *efct = node->efct;
|
||||
struct fc_frame_header *fchdr = seq->header->dma.virt;
|
||||
struct fcp_cmnd *cmnd = NULL;
|
||||
struct efct_io *io = NULL;
|
||||
u32 lun;
|
||||
|
||||
if (!seq->payload) {
|
||||
efc_log_err(efct, "Sequence payload is NULL.\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
cmnd = seq->payload->dma.virt;
|
||||
|
||||
/* perform FCP_CMND validation check(s) */
|
||||
if (efct_validate_fcp_cmd(efct, seq))
|
||||
return -EIO;
|
||||
|
||||
lun = scsilun_to_int(&cmnd->fc_lun);
|
||||
if (lun == U32_MAX)
|
||||
return -EIO;
|
||||
|
||||
io = efct_scsi_io_alloc(node);
|
||||
if (!io) {
|
||||
int rc;
|
||||
|
||||
/* Use SEND_FRAME to send task set full or busy */
|
||||
rc = efct_sframe_send_task_set_full_or_busy(node, seq);
|
||||
if (rc)
|
||||
efc_log_err(efct, "Failed to send busy task: %d\n", rc);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
io->hw_priv = seq->hw_priv;
|
||||
|
||||
io->app_id = 0;
|
||||
|
||||
/* RQ pair, if we got here, SIT=1 */
|
||||
efct_populate_io_fcp_cmd(io, cmnd, fchdr, true);
|
||||
|
||||
if (cmnd->fc_tm_flags) {
|
||||
efct_dispatch_unsol_tmf(io, cmnd->fc_tm_flags, lun);
|
||||
} else {
|
||||
u32 flags = efct_get_flags_fcp_cmd(cmnd);
|
||||
|
||||
if (cmnd->fc_flags & FCP_CFL_LEN_MASK) {
|
||||
efc_log_err(efct, "Additional CDB not supported\n");
|
||||
return -EIO;
|
||||
}
|
||||
/*
|
||||
* Can return failure for things like task set full and UAs,
|
||||
* no need to treat as a dropped frame if rc != 0
|
||||
*/
|
||||
efct_scsi_recv_cmd(io, lun, cmnd->fc_cdb,
|
||||
sizeof(cmnd->fc_cdb), flags);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
efct_process_abts(struct efct_io *io, struct fc_frame_header *hdr)
|
||||
{
|
||||
struct efct_node *node = io->node;
|
||||
struct efct *efct = io->efct;
|
||||
u16 ox_id = be16_to_cpu(hdr->fh_ox_id);
|
||||
u16 rx_id = be16_to_cpu(hdr->fh_rx_id);
|
||||
struct efct_io *abortio;
|
||||
|
||||
/* Find IO and attempt to take a reference on it */
|
||||
abortio = efct_io_find_tgt_io(efct, node, ox_id, rx_id);
|
||||
|
||||
if (abortio) {
|
||||
/* Got a reference on the IO. Hold it until backend
|
||||
* is notified below
|
||||
*/
|
||||
efc_log_info(node->efct, "Abort ox_id [%04x] rx_id [%04x]\n",
|
||||
ox_id, rx_id);
|
||||
|
||||
/*
|
||||
* Save the ox_id for the ABTS as the init_task_tag in our
|
||||
* manufactured
|
||||
* TMF IO object
|
||||
*/
|
||||
io->display_name = "abts";
|
||||
io->init_task_tag = ox_id;
|
||||
/* don't set tgt_task_tag, don't want to confuse with XRI */
|
||||
|
||||
/*
|
||||
* Save the rx_id from the ABTS as it is
|
||||
* needed for the BLS response,
|
||||
* regardless of the IO context's rx_id
|
||||
*/
|
||||
io->abort_rx_id = rx_id;
|
||||
|
||||
/* Call target server command abort */
|
||||
io->tmf_cmd = EFCT_SCSI_TMF_ABORT_TASK;
|
||||
efct_scsi_recv_tmf(io, abortio->tgt_io.lun,
|
||||
EFCT_SCSI_TMF_ABORT_TASK, abortio, 0);
|
||||
|
||||
/*
|
||||
* Backend will have taken an additional
|
||||
* reference on the IO if needed;
|
||||
* done with current reference.
|
||||
*/
|
||||
kref_put(&abortio->ref, abortio->release);
|
||||
} else {
|
||||
/*
|
||||
* Either IO was not found or it has been
|
||||
* freed between finding it
|
||||
* and attempting to get the reference,
|
||||
*/
|
||||
efc_log_info(node->efct, "Abort: ox_id [%04x], IO not found\n",
|
||||
ox_id);
|
||||
|
||||
/* Send a BA_RJT */
|
||||
efct_bls_send_rjt(io, hdr);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
efct_node_recv_abts_frame(struct efct_node *node, struct efc_hw_sequence *seq)
|
||||
{
|
||||
struct efct *efct = node->efct;
|
||||
struct fc_frame_header *hdr = seq->header->dma.virt;
|
||||
struct efct_io *io = NULL;
|
||||
|
||||
node->abort_cnt++;
|
||||
io = efct_scsi_io_alloc(node);
|
||||
if (io) {
|
||||
io->hw_priv = seq->hw_priv;
|
||||
/* If we got this far, SIT=1 */
|
||||
io->seq_init = 1;
|
||||
|
||||
/* fill out generic fields */
|
||||
io->efct = efct;
|
||||
io->node = node;
|
||||
io->cmd_tgt = true;
|
||||
|
||||
efct_process_abts(io, seq->header->dma.virt);
|
||||
} else {
|
||||
efc_log_err(efct,
|
||||
"SCSI IO allocation failed for ABTS received ");
|
||||
efc_log_err(efct, "s_id %06x d_id %06x ox_id %04x rx_id %04x\n",
|
||||
ntoh24(hdr->fh_s_id), ntoh24(hdr->fh_d_id),
|
||||
be16_to_cpu(hdr->fh_ox_id),
|
||||
be16_to_cpu(hdr->fh_rx_id));
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -0,0 +1,17 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (C) 2021 Broadcom. All Rights Reserved. The term
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
|
||||
*/
|
||||
|
||||
#if !defined(__OSC_UNSOL_H__)
|
||||
#define __OSC_UNSOL_H__
|
||||
|
||||
int
|
||||
efct_unsolicited_cb(void *arg, struct efc_hw_sequence *seq);
|
||||
int
|
||||
efct_dispatch_fcp_cmd(struct efct_node *node, struct efc_hw_sequence *seq);
|
||||
int
|
||||
efct_node_recv_abts_frame(struct efct_node *node, struct efc_hw_sequence *seq);
|
||||
|
||||
#endif /* __OSC_UNSOL_H__ */
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,186 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (C) 2021 Broadcom. All Rights Reserved. The term
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
|
||||
*/
|
||||
|
||||
#if !defined(__EFCT_XPORT_H__)
|
||||
#define __EFCT_XPORT_H__
|
||||
|
||||
enum efct_xport_ctrl {
|
||||
EFCT_XPORT_PORT_ONLINE = 1,
|
||||
EFCT_XPORT_PORT_OFFLINE,
|
||||
EFCT_XPORT_SHUTDOWN,
|
||||
EFCT_XPORT_POST_NODE_EVENT,
|
||||
EFCT_XPORT_WWNN_SET,
|
||||
EFCT_XPORT_WWPN_SET,
|
||||
};
|
||||
|
||||
enum efct_xport_status {
|
||||
EFCT_XPORT_PORT_STATUS,
|
||||
EFCT_XPORT_CONFIG_PORT_STATUS,
|
||||
EFCT_XPORT_LINK_SPEED,
|
||||
EFCT_XPORT_IS_SUPPORTED_LINK_SPEED,
|
||||
EFCT_XPORT_LINK_STATISTICS,
|
||||
EFCT_XPORT_LINK_STAT_RESET,
|
||||
EFCT_XPORT_IS_QUIESCED
|
||||
};
|
||||
|
||||
struct efct_xport_link_stats {
|
||||
bool rec;
|
||||
bool gec;
|
||||
bool w02of;
|
||||
bool w03of;
|
||||
bool w04of;
|
||||
bool w05of;
|
||||
bool w06of;
|
||||
bool w07of;
|
||||
bool w08of;
|
||||
bool w09of;
|
||||
bool w10of;
|
||||
bool w11of;
|
||||
bool w12of;
|
||||
bool w13of;
|
||||
bool w14of;
|
||||
bool w15of;
|
||||
bool w16of;
|
||||
bool w17of;
|
||||
bool w18of;
|
||||
bool w19of;
|
||||
bool w20of;
|
||||
bool w21of;
|
||||
bool clrc;
|
||||
bool clof1;
|
||||
u32 link_failure_error_count;
|
||||
u32 loss_of_sync_error_count;
|
||||
u32 loss_of_signal_error_count;
|
||||
u32 primitive_sequence_error_count;
|
||||
u32 invalid_transmission_word_error_count;
|
||||
u32 crc_error_count;
|
||||
u32 primitive_sequence_event_timeout_count;
|
||||
u32 elastic_buffer_overrun_error_count;
|
||||
u32 arbitration_fc_al_timeout_count;
|
||||
u32 advertised_receive_bufftor_to_buffer_credit;
|
||||
u32 current_receive_buffer_to_buffer_credit;
|
||||
u32 advertised_transmit_buffer_to_buffer_credit;
|
||||
u32 current_transmit_buffer_to_buffer_credit;
|
||||
u32 received_eofa_count;
|
||||
u32 received_eofdti_count;
|
||||
u32 received_eofni_count;
|
||||
u32 received_soff_count;
|
||||
u32 received_dropped_no_aer_count;
|
||||
u32 received_dropped_no_available_rpi_resources_count;
|
||||
u32 received_dropped_no_available_xri_resources_count;
|
||||
};
|
||||
|
||||
struct efct_xport_host_stats {
|
||||
bool cc;
|
||||
u32 transmit_kbyte_count;
|
||||
u32 receive_kbyte_count;
|
||||
u32 transmit_frame_count;
|
||||
u32 receive_frame_count;
|
||||
u32 transmit_sequence_count;
|
||||
u32 receive_sequence_count;
|
||||
u32 total_exchanges_originator;
|
||||
u32 total_exchanges_responder;
|
||||
u32 receive_p_bsy_count;
|
||||
u32 receive_f_bsy_count;
|
||||
u32 dropped_frames_due_to_no_rq_buffer_count;
|
||||
u32 empty_rq_timeout_count;
|
||||
u32 dropped_frames_due_to_no_xri_count;
|
||||
u32 empty_xri_pool_count;
|
||||
};
|
||||
|
||||
struct efct_xport_host_statistics {
|
||||
struct completion done;
|
||||
struct efct_xport_link_stats link_stats;
|
||||
struct efct_xport_host_stats host_stats;
|
||||
};
|
||||
|
||||
union efct_xport_stats_u {
|
||||
u32 value;
|
||||
struct efct_xport_host_statistics stats;
|
||||
};
|
||||
|
||||
struct efct_xport_fcp_stats {
|
||||
u64 input_bytes;
|
||||
u64 output_bytes;
|
||||
u64 input_requests;
|
||||
u64 output_requests;
|
||||
u64 control_requests;
|
||||
};
|
||||
|
||||
struct efct_xport {
|
||||
struct efct *efct;
|
||||
/* wwpn requested by user for primary nport */
|
||||
u64 req_wwpn;
|
||||
/* wwnn requested by user for primary nport */
|
||||
u64 req_wwnn;
|
||||
|
||||
/* Nodes */
|
||||
/* number of allocated nodes */
|
||||
u32 nodes_count;
|
||||
/* used to track how often IO pool is empty */
|
||||
atomic_t io_alloc_failed_count;
|
||||
/* array of pointers to nodes */
|
||||
struct efc_node **nodes;
|
||||
|
||||
/* Io pool and counts */
|
||||
/* pointer to IO pool */
|
||||
struct efct_io_pool *io_pool;
|
||||
/* lock for io_pending_list */
|
||||
spinlock_t io_pending_lock;
|
||||
/* list of IOs waiting for HW resources
|
||||
* lock: xport->io_pending_lock
|
||||
* link: efct_io_s->io_pending_link
|
||||
*/
|
||||
struct list_head io_pending_list;
|
||||
/* count of totals IOS allocated */
|
||||
atomic_t io_total_alloc;
|
||||
/* count of totals IOS free'd */
|
||||
atomic_t io_total_free;
|
||||
/* count of totals IOS that were pended */
|
||||
atomic_t io_total_pending;
|
||||
/* count of active IOS */
|
||||
atomic_t io_active_count;
|
||||
/* count of pending IOS */
|
||||
atomic_t io_pending_count;
|
||||
/* non-zero if efct_scsi_check_pending is executing */
|
||||
atomic_t io_pending_recursing;
|
||||
|
||||
/* Port */
|
||||
/* requested link state */
|
||||
u32 configured_link_state;
|
||||
|
||||
/* Timer for Statistics */
|
||||
struct timer_list stats_timer;
|
||||
union efct_xport_stats_u fc_xport_stats;
|
||||
struct efct_xport_fcp_stats fcp_stats;
|
||||
};
|
||||
|
||||
struct efct_rport_data {
|
||||
struct efc_node *node;
|
||||
};
|
||||
|
||||
struct efct_xport *
|
||||
efct_xport_alloc(struct efct *efct);
|
||||
int
|
||||
efct_xport_attach(struct efct_xport *xport);
|
||||
int
|
||||
efct_xport_initialize(struct efct_xport *xport);
|
||||
void
|
||||
efct_xport_detach(struct efct_xport *xport);
|
||||
int
|
||||
efct_xport_control(struct efct_xport *xport, enum efct_xport_ctrl cmd, ...);
|
||||
int
|
||||
efct_xport_status(struct efct_xport *xport, enum efct_xport_status cmd,
|
||||
union efct_xport_stats_u *result);
|
||||
void
|
||||
efct_xport_free(struct efct_xport *xport);
|
||||
|
||||
struct scsi_transport_template *efct_attach_fc_transport(void);
|
||||
struct scsi_transport_template *efct_attach_vport_fc_transport(void);
|
||||
void
|
||||
efct_release_fc_transport(struct scsi_transport_template *transport_template);
|
||||
|
||||
#endif /* __EFCT_XPORT_H__ */
|
|
@ -0,0 +1,37 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (C) 2021 Broadcom. All Rights Reserved. The term
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
|
||||
*/
|
||||
|
||||
#ifndef __EFC_COMMON_H__
|
||||
#define __EFC_COMMON_H__
|
||||
|
||||
#include <linux/pci.h>
|
||||
|
||||
struct efc_dma {
|
||||
void *virt;
|
||||
void *alloc;
|
||||
dma_addr_t phys;
|
||||
|
||||
size_t size;
|
||||
size_t len;
|
||||
struct pci_dev *pdev;
|
||||
};
|
||||
|
||||
#define efc_log_crit(efc, fmt, args...) \
|
||||
dev_crit(&((efc)->pci)->dev, fmt, ##args)
|
||||
|
||||
#define efc_log_err(efc, fmt, args...) \
|
||||
dev_err(&((efc)->pci)->dev, fmt, ##args)
|
||||
|
||||
#define efc_log_warn(efc, fmt, args...) \
|
||||
dev_warn(&((efc)->pci)->dev, fmt, ##args)
|
||||
|
||||
#define efc_log_info(efc, fmt, args...) \
|
||||
dev_info(&((efc)->pci)->dev, fmt, ##args)
|
||||
|
||||
#define efc_log_debug(efc, fmt, args...) \
|
||||
dev_dbg(&((efc)->pci)->dev, fmt, ##args)
|
||||
|
||||
#endif /* __EFC_COMMON_H__ */
|
|
@ -0,0 +1,52 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (C) 2021 Broadcom. All Rights Reserved. The term
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
|
||||
*/
|
||||
|
||||
#ifndef __EFC_H__
|
||||
#define __EFC_H__
|
||||
|
||||
#include "../include/efc_common.h"
|
||||
#include "efclib.h"
|
||||
#include "efc_sm.h"
|
||||
#include "efc_cmds.h"
|
||||
#include "efc_domain.h"
|
||||
#include "efc_nport.h"
|
||||
#include "efc_node.h"
|
||||
#include "efc_fabric.h"
|
||||
#include "efc_device.h"
|
||||
#include "efc_els.h"
|
||||
|
||||
#define EFC_MAX_REMOTE_NODES 2048
|
||||
#define NODE_SPARAMS_SIZE 256
|
||||
|
||||
enum efc_scsi_del_initiator_reason {
|
||||
EFC_SCSI_INITIATOR_DELETED,
|
||||
EFC_SCSI_INITIATOR_MISSING,
|
||||
};
|
||||
|
||||
enum efc_scsi_del_target_reason {
|
||||
EFC_SCSI_TARGET_DELETED,
|
||||
EFC_SCSI_TARGET_MISSING,
|
||||
};
|
||||
|
||||
#define EFC_FC_ELS_DEFAULT_RETRIES 3
|
||||
|
||||
#define domain_sm_trace(domain) \
|
||||
efc_log_debug(domain->efc, "[domain:%s] %-20s %-20s\n", \
|
||||
domain->display_name, __func__, efc_sm_event_name(evt)) \
|
||||
|
||||
#define domain_trace(domain, fmt, ...) \
|
||||
efc_log_debug(domain->efc, \
|
||||
"[%s]" fmt, domain->display_name, ##__VA_ARGS__) \
|
||||
|
||||
#define node_sm_trace() \
|
||||
efc_log_debug(node->efc, "[%s] %-20s %-20s\n", \
|
||||
node->display_name, __func__, efc_sm_event_name(evt)) \
|
||||
|
||||
#define nport_sm_trace(nport) \
|
||||
efc_log_debug(nport->efc, \
|
||||
"[%s] %-20s\n", nport->display_name, efc_sm_event_name(evt)) \
|
||||
|
||||
#endif /* __EFC_H__ */
|
|
@ -0,0 +1,777 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (C) 2021 Broadcom. All Rights Reserved. The term
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
|
||||
*/
|
||||
|
||||
#include "efclib.h"
|
||||
#include "../libefc_sli/sli4.h"
|
||||
#include "efc_cmds.h"
|
||||
#include "efc_sm.h"
|
||||
|
||||
static void
|
||||
efc_nport_free_resources(struct efc_nport *nport, int evt, void *data)
|
||||
{
|
||||
struct efc *efc = nport->efc;
|
||||
|
||||
/* Clear the nport attached flag */
|
||||
nport->attached = false;
|
||||
|
||||
/* Free the service parameters buffer */
|
||||
if (nport->dma.virt) {
|
||||
dma_free_coherent(&efc->pci->dev, nport->dma.size,
|
||||
nport->dma.virt, nport->dma.phys);
|
||||
memset(&nport->dma, 0, sizeof(struct efc_dma));
|
||||
}
|
||||
|
||||
/* Free the SLI resources */
|
||||
sli_resource_free(efc->sli, SLI4_RSRC_VPI, nport->indicator);
|
||||
|
||||
efc_nport_cb(efc, evt, nport);
|
||||
}
|
||||
|
||||
static int
|
||||
efc_nport_get_mbox_status(struct efc_nport *nport, u8 *mqe, int status)
|
||||
{
|
||||
struct efc *efc = nport->efc;
|
||||
struct sli4_mbox_command_header *hdr =
|
||||
(struct sli4_mbox_command_header *)mqe;
|
||||
|
||||
if (status || le16_to_cpu(hdr->status)) {
|
||||
efc_log_debug(efc, "bad status vpi=%#x st=%x hdr=%x\n",
|
||||
nport->indicator, status, le16_to_cpu(hdr->status));
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
efc_nport_free_unreg_vpi_cb(struct efc *efc, int status, u8 *mqe, void *arg)
|
||||
{
|
||||
struct efc_nport *nport = arg;
|
||||
int evt = EFC_EVT_NPORT_FREE_OK;
|
||||
int rc;
|
||||
|
||||
rc = efc_nport_get_mbox_status(nport, mqe, status);
|
||||
if (rc)
|
||||
evt = EFC_EVT_NPORT_FREE_FAIL;
|
||||
|
||||
efc_nport_free_resources(nport, evt, mqe);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void
|
||||
efc_nport_free_unreg_vpi(struct efc_nport *nport)
|
||||
{
|
||||
struct efc *efc = nport->efc;
|
||||
int rc;
|
||||
u8 data[SLI4_BMBX_SIZE];
|
||||
|
||||
rc = sli_cmd_unreg_vpi(efc->sli, data, nport->indicator,
|
||||
SLI4_UNREG_TYPE_PORT);
|
||||
if (rc) {
|
||||
efc_log_err(efc, "UNREG_VPI format failure\n");
|
||||
efc_nport_free_resources(nport, EFC_EVT_NPORT_FREE_FAIL, data);
|
||||
return;
|
||||
}
|
||||
|
||||
rc = efc->tt.issue_mbox_rqst(efc->base, data,
|
||||
efc_nport_free_unreg_vpi_cb, nport);
|
||||
if (rc) {
|
||||
efc_log_err(efc, "UNREG_VPI command failure\n");
|
||||
efc_nport_free_resources(nport, EFC_EVT_NPORT_FREE_FAIL, data);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
efc_nport_send_evt(struct efc_nport *nport, int evt, void *data)
|
||||
{
|
||||
struct efc *efc = nport->efc;
|
||||
|
||||
/* Now inform the registered callbacks */
|
||||
efc_nport_cb(efc, evt, nport);
|
||||
|
||||
/* Set the nport attached flag */
|
||||
if (evt == EFC_EVT_NPORT_ATTACH_OK)
|
||||
nport->attached = true;
|
||||
|
||||
/* If there is a pending free request, then handle it now */
|
||||
if (nport->free_req_pending)
|
||||
efc_nport_free_unreg_vpi(nport);
|
||||
}
|
||||
|
||||
static int
|
||||
efc_nport_alloc_init_vpi_cb(struct efc *efc, int status, u8 *mqe, void *arg)
|
||||
{
|
||||
struct efc_nport *nport = arg;
|
||||
|
||||
if (efc_nport_get_mbox_status(nport, mqe, status)) {
|
||||
efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, mqe);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
efc_nport_send_evt(nport, EFC_EVT_NPORT_ALLOC_OK, mqe);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
efc_nport_alloc_init_vpi(struct efc_nport *nport)
|
||||
{
|
||||
struct efc *efc = nport->efc;
|
||||
u8 data[SLI4_BMBX_SIZE];
|
||||
int rc;
|
||||
|
||||
/* If there is a pending free request, then handle it now */
|
||||
if (nport->free_req_pending) {
|
||||
efc_nport_free_resources(nport, EFC_EVT_NPORT_FREE_OK, data);
|
||||
return;
|
||||
}
|
||||
|
||||
rc = sli_cmd_init_vpi(efc->sli, data,
|
||||
nport->indicator, nport->domain->indicator);
|
||||
if (rc) {
|
||||
efc_log_err(efc, "INIT_VPI format failure\n");
|
||||
efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, data);
|
||||
return;
|
||||
}
|
||||
|
||||
rc = efc->tt.issue_mbox_rqst(efc->base, data,
|
||||
efc_nport_alloc_init_vpi_cb, nport);
|
||||
if (rc) {
|
||||
efc_log_err(efc, "INIT_VPI command failure\n");
|
||||
efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, data);
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
efc_nport_alloc_read_sparm64_cb(struct efc *efc, int status, u8 *mqe, void *arg)
|
||||
{
|
||||
struct efc_nport *nport = arg;
|
||||
u8 *payload = NULL;
|
||||
|
||||
if (efc_nport_get_mbox_status(nport, mqe, status)) {
|
||||
efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, mqe);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
payload = nport->dma.virt;
|
||||
|
||||
memcpy(&nport->sli_wwpn, payload + SLI4_READ_SPARM64_WWPN_OFFSET,
|
||||
sizeof(nport->sli_wwpn));
|
||||
memcpy(&nport->sli_wwnn, payload + SLI4_READ_SPARM64_WWNN_OFFSET,
|
||||
sizeof(nport->sli_wwnn));
|
||||
|
||||
dma_free_coherent(&efc->pci->dev, nport->dma.size, nport->dma.virt,
|
||||
nport->dma.phys);
|
||||
memset(&nport->dma, 0, sizeof(struct efc_dma));
|
||||
efc_nport_alloc_init_vpi(nport);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
efc_nport_alloc_read_sparm64(struct efc *efc, struct efc_nport *nport)
|
||||
{
|
||||
u8 data[SLI4_BMBX_SIZE];
|
||||
int rc;
|
||||
|
||||
/* Allocate memory for the service parameters */
|
||||
nport->dma.size = EFC_SPARAM_DMA_SZ;
|
||||
nport->dma.virt = dma_alloc_coherent(&efc->pci->dev,
|
||||
nport->dma.size, &nport->dma.phys,
|
||||
GFP_DMA);
|
||||
if (!nport->dma.virt) {
|
||||
efc_log_err(efc, "Failed to allocate DMA memory\n");
|
||||
efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, data);
|
||||
return;
|
||||
}
|
||||
|
||||
rc = sli_cmd_read_sparm64(efc->sli, data,
|
||||
&nport->dma, nport->indicator);
|
||||
if (rc) {
|
||||
efc_log_err(efc, "READ_SPARM64 format failure\n");
|
||||
efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, data);
|
||||
return;
|
||||
}
|
||||
|
||||
rc = efc->tt.issue_mbox_rqst(efc->base, data,
|
||||
efc_nport_alloc_read_sparm64_cb, nport);
|
||||
if (rc) {
|
||||
efc_log_err(efc, "READ_SPARM64 command failure\n");
|
||||
efc_nport_free_resources(nport, EFC_EVT_NPORT_ALLOC_FAIL, data);
|
||||
}
|
||||
}
|
||||
|
||||
int
|
||||
efc_cmd_nport_alloc(struct efc *efc, struct efc_nport *nport,
|
||||
struct efc_domain *domain, u8 *wwpn)
|
||||
{
|
||||
u32 index;
|
||||
|
||||
nport->indicator = U32_MAX;
|
||||
nport->free_req_pending = false;
|
||||
|
||||
if (wwpn)
|
||||
memcpy(&nport->sli_wwpn, wwpn, sizeof(nport->sli_wwpn));
|
||||
|
||||
/*
|
||||
* allocate a VPI object for the port and stores it in the
|
||||
* indicator field of the port object.
|
||||
*/
|
||||
if (sli_resource_alloc(efc->sli, SLI4_RSRC_VPI,
|
||||
&nport->indicator, &index)) {
|
||||
efc_log_err(efc, "VPI allocation failure\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (domain) {
|
||||
/*
|
||||
* If the WWPN is NULL, fetch the default
|
||||
* WWPN and WWNN before initializing the VPI
|
||||
*/
|
||||
if (!wwpn)
|
||||
efc_nport_alloc_read_sparm64(efc, nport);
|
||||
else
|
||||
efc_nport_alloc_init_vpi(nport);
|
||||
} else if (!wwpn) {
|
||||
/* domain NULL and wwpn non-NULL */
|
||||
efc_log_err(efc, "need WWN for physical port\n");
|
||||
sli_resource_free(efc->sli, SLI4_RSRC_VPI, nport->indicator);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
efc_nport_attach_reg_vpi_cb(struct efc *efc, int status, u8 *mqe,
|
||||
void *arg)
|
||||
{
|
||||
struct efc_nport *nport = arg;
|
||||
|
||||
if (efc_nport_get_mbox_status(nport, mqe, status)) {
|
||||
efc_nport_free_resources(nport, EFC_EVT_NPORT_ATTACH_FAIL, mqe);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
efc_nport_send_evt(nport, EFC_EVT_NPORT_ATTACH_OK, mqe);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
efc_cmd_nport_attach(struct efc *efc, struct efc_nport *nport, u32 fc_id)
|
||||
{
|
||||
u8 buf[SLI4_BMBX_SIZE];
|
||||
int rc = 0;
|
||||
|
||||
if (!nport) {
|
||||
efc_log_err(efc, "bad param(s) nport=%p\n", nport);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
nport->fc_id = fc_id;
|
||||
|
||||
/* register previously-allocated VPI with the device */
|
||||
rc = sli_cmd_reg_vpi(efc->sli, buf, nport->fc_id,
|
||||
nport->sli_wwpn, nport->indicator,
|
||||
nport->domain->indicator, false);
|
||||
if (rc) {
|
||||
efc_log_err(efc, "REG_VPI format failure\n");
|
||||
efc_nport_free_resources(nport, EFC_EVT_NPORT_ATTACH_FAIL, buf);
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = efc->tt.issue_mbox_rqst(efc->base, buf,
|
||||
efc_nport_attach_reg_vpi_cb, nport);
|
||||
if (rc) {
|
||||
efc_log_err(efc, "REG_VPI command failure\n");
|
||||
efc_nport_free_resources(nport, EFC_EVT_NPORT_ATTACH_FAIL, buf);
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
int
|
||||
efc_cmd_nport_free(struct efc *efc, struct efc_nport *nport)
|
||||
{
|
||||
if (!nport) {
|
||||
efc_log_err(efc, "bad parameter(s) nport=%p\n", nport);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/* Issue the UNREG_VPI command to free the assigned VPI context */
|
||||
if (nport->attached)
|
||||
efc_nport_free_unreg_vpi(nport);
|
||||
else
|
||||
nport->free_req_pending = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
efc_domain_get_mbox_status(struct efc_domain *domain, u8 *mqe, int status)
|
||||
{
|
||||
struct efc *efc = domain->efc;
|
||||
struct sli4_mbox_command_header *hdr =
|
||||
(struct sli4_mbox_command_header *)mqe;
|
||||
|
||||
if (status || le16_to_cpu(hdr->status)) {
|
||||
efc_log_debug(efc, "bad status vfi=%#x st=%x hdr=%x\n",
|
||||
domain->indicator, status,
|
||||
le16_to_cpu(hdr->status));
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
efc_domain_free_resources(struct efc_domain *domain, int evt, void *data)
|
||||
{
|
||||
struct efc *efc = domain->efc;
|
||||
|
||||
/* Free the service parameters buffer */
|
||||
if (domain->dma.virt) {
|
||||
dma_free_coherent(&efc->pci->dev,
|
||||
domain->dma.size, domain->dma.virt,
|
||||
domain->dma.phys);
|
||||
memset(&domain->dma, 0, sizeof(struct efc_dma));
|
||||
}
|
||||
|
||||
/* Free the SLI resources */
|
||||
sli_resource_free(efc->sli, SLI4_RSRC_VFI, domain->indicator);
|
||||
|
||||
efc_domain_cb(efc, evt, domain);
|
||||
}
|
||||
|
||||
static void
|
||||
efc_domain_send_nport_evt(struct efc_domain *domain,
|
||||
int port_evt, int domain_evt, void *data)
|
||||
{
|
||||
struct efc *efc = domain->efc;
|
||||
|
||||
/* Send alloc/attach ok to the physical nport */
|
||||
efc_nport_send_evt(domain->nport, port_evt, NULL);
|
||||
|
||||
/* Now inform the registered callbacks */
|
||||
efc_domain_cb(efc, domain_evt, domain);
|
||||
}
|
||||
|
||||
static int
|
||||
efc_domain_alloc_read_sparm64_cb(struct efc *efc, int status, u8 *mqe,
|
||||
void *arg)
|
||||
{
|
||||
struct efc_domain *domain = arg;
|
||||
|
||||
if (efc_domain_get_mbox_status(domain, mqe, status)) {
|
||||
efc_domain_free_resources(domain,
|
||||
EFC_HW_DOMAIN_ALLOC_FAIL, mqe);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
efc_domain_send_nport_evt(domain, EFC_EVT_NPORT_ALLOC_OK,
|
||||
EFC_HW_DOMAIN_ALLOC_OK, mqe);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
efc_domain_alloc_read_sparm64(struct efc_domain *domain)
|
||||
{
|
||||
struct efc *efc = domain->efc;
|
||||
u8 data[SLI4_BMBX_SIZE];
|
||||
int rc;
|
||||
|
||||
rc = sli_cmd_read_sparm64(efc->sli, data, &domain->dma, 0);
|
||||
if (rc) {
|
||||
efc_log_err(efc, "READ_SPARM64 format failure\n");
|
||||
efc_domain_free_resources(domain,
|
||||
EFC_HW_DOMAIN_ALLOC_FAIL, data);
|
||||
return;
|
||||
}
|
||||
|
||||
rc = efc->tt.issue_mbox_rqst(efc->base, data,
|
||||
efc_domain_alloc_read_sparm64_cb, domain);
|
||||
if (rc) {
|
||||
efc_log_err(efc, "READ_SPARM64 command failure\n");
|
||||
efc_domain_free_resources(domain,
|
||||
EFC_HW_DOMAIN_ALLOC_FAIL, data);
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
efc_domain_alloc_init_vfi_cb(struct efc *efc, int status, u8 *mqe,
|
||||
void *arg)
|
||||
{
|
||||
struct efc_domain *domain = arg;
|
||||
|
||||
if (efc_domain_get_mbox_status(domain, mqe, status)) {
|
||||
efc_domain_free_resources(domain,
|
||||
EFC_HW_DOMAIN_ALLOC_FAIL, mqe);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
efc_domain_alloc_read_sparm64(domain);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
efc_domain_alloc_init_vfi(struct efc_domain *domain)
|
||||
{
|
||||
struct efc *efc = domain->efc;
|
||||
struct efc_nport *nport = domain->nport;
|
||||
u8 data[SLI4_BMBX_SIZE];
|
||||
int rc;
|
||||
|
||||
/*
|
||||
* For FC, the HW alread registered an FCFI.
|
||||
* Copy FCF information into the domain and jump to INIT_VFI.
|
||||
*/
|
||||
domain->fcf_indicator = efc->fcfi;
|
||||
rc = sli_cmd_init_vfi(efc->sli, data, domain->indicator,
|
||||
domain->fcf_indicator, nport->indicator);
|
||||
if (rc) {
|
||||
efc_log_err(efc, "INIT_VFI format failure\n");
|
||||
efc_domain_free_resources(domain,
|
||||
EFC_HW_DOMAIN_ALLOC_FAIL, data);
|
||||
return;
|
||||
}
|
||||
|
||||
efc_log_err(efc, "%s issue mbox\n", __func__);
|
||||
rc = efc->tt.issue_mbox_rqst(efc->base, data,
|
||||
efc_domain_alloc_init_vfi_cb, domain);
|
||||
if (rc) {
|
||||
efc_log_err(efc, "INIT_VFI command failure\n");
|
||||
efc_domain_free_resources(domain,
|
||||
EFC_HW_DOMAIN_ALLOC_FAIL, data);
|
||||
}
|
||||
}
|
||||
|
||||
int
|
||||
efc_cmd_domain_alloc(struct efc *efc, struct efc_domain *domain, u32 fcf)
|
||||
{
|
||||
u32 index;
|
||||
|
||||
if (!domain || !domain->nport) {
|
||||
efc_log_err(efc, "bad parameter(s) domain=%p nport=%p\n",
|
||||
domain, domain ? domain->nport : NULL);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/* allocate memory for the service parameters */
|
||||
domain->dma.size = EFC_SPARAM_DMA_SZ;
|
||||
domain->dma.virt = dma_alloc_coherent(&efc->pci->dev,
|
||||
domain->dma.size,
|
||||
&domain->dma.phys, GFP_DMA);
|
||||
if (!domain->dma.virt) {
|
||||
efc_log_err(efc, "Failed to allocate DMA memory\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
domain->fcf = fcf;
|
||||
domain->fcf_indicator = U32_MAX;
|
||||
domain->indicator = U32_MAX;
|
||||
|
||||
if (sli_resource_alloc(efc->sli, SLI4_RSRC_VFI, &domain->indicator,
|
||||
&index)) {
|
||||
efc_log_err(efc, "VFI allocation failure\n");
|
||||
|
||||
dma_free_coherent(&efc->pci->dev,
|
||||
domain->dma.size, domain->dma.virt,
|
||||
domain->dma.phys);
|
||||
memset(&domain->dma, 0, sizeof(struct efc_dma));
|
||||
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
efc_domain_alloc_init_vfi(domain);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
efc_domain_attach_reg_vfi_cb(struct efc *efc, int status, u8 *mqe,
|
||||
void *arg)
|
||||
{
|
||||
struct efc_domain *domain = arg;
|
||||
|
||||
if (efc_domain_get_mbox_status(domain, mqe, status)) {
|
||||
efc_domain_free_resources(domain,
|
||||
EFC_HW_DOMAIN_ATTACH_FAIL, mqe);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
efc_domain_send_nport_evt(domain, EFC_EVT_NPORT_ATTACH_OK,
|
||||
EFC_HW_DOMAIN_ATTACH_OK, mqe);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
efc_cmd_domain_attach(struct efc *efc, struct efc_domain *domain, u32 fc_id)
|
||||
{
|
||||
u8 buf[SLI4_BMBX_SIZE];
|
||||
int rc = 0;
|
||||
|
||||
if (!domain) {
|
||||
efc_log_err(efc, "bad param(s) domain=%p\n", domain);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
domain->nport->fc_id = fc_id;
|
||||
|
||||
rc = sli_cmd_reg_vfi(efc->sli, buf, SLI4_BMBX_SIZE, domain->indicator,
|
||||
domain->fcf_indicator, domain->dma,
|
||||
domain->nport->indicator, domain->nport->sli_wwpn,
|
||||
domain->nport->fc_id);
|
||||
if (rc) {
|
||||
efc_log_err(efc, "REG_VFI format failure\n");
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
rc = efc->tt.issue_mbox_rqst(efc->base, buf,
|
||||
efc_domain_attach_reg_vfi_cb, domain);
|
||||
if (rc) {
|
||||
efc_log_err(efc, "REG_VFI command failure\n");
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
return rc;
|
||||
|
||||
cleanup:
|
||||
efc_domain_free_resources(domain, EFC_HW_DOMAIN_ATTACH_FAIL, buf);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int
|
||||
efc_domain_free_unreg_vfi_cb(struct efc *efc, int status, u8 *mqe, void *arg)
|
||||
{
|
||||
struct efc_domain *domain = arg;
|
||||
int evt = EFC_HW_DOMAIN_FREE_OK;
|
||||
int rc;
|
||||
|
||||
rc = efc_domain_get_mbox_status(domain, mqe, status);
|
||||
if (rc) {
|
||||
evt = EFC_HW_DOMAIN_FREE_FAIL;
|
||||
rc = -EIO;
|
||||
}
|
||||
|
||||
efc_domain_free_resources(domain, evt, mqe);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void
|
||||
efc_domain_free_unreg_vfi(struct efc_domain *domain)
|
||||
{
|
||||
struct efc *efc = domain->efc;
|
||||
int rc;
|
||||
u8 data[SLI4_BMBX_SIZE];
|
||||
|
||||
rc = sli_cmd_unreg_vfi(efc->sli, data, domain->indicator,
|
||||
SLI4_UNREG_TYPE_DOMAIN);
|
||||
if (rc) {
|
||||
efc_log_err(efc, "UNREG_VFI format failure\n");
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
rc = efc->tt.issue_mbox_rqst(efc->base, data,
|
||||
efc_domain_free_unreg_vfi_cb, domain);
|
||||
if (rc) {
|
||||
efc_log_err(efc, "UNREG_VFI command failure\n");
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
return;
|
||||
|
||||
cleanup:
|
||||
efc_domain_free_resources(domain, EFC_HW_DOMAIN_FREE_FAIL, data);
|
||||
}
|
||||
|
||||
int
|
||||
efc_cmd_domain_free(struct efc *efc, struct efc_domain *domain)
|
||||
{
|
||||
if (!domain) {
|
||||
efc_log_err(efc, "bad parameter(s) domain=%p\n", domain);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
efc_domain_free_unreg_vfi(domain);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
efc_cmd_node_alloc(struct efc *efc, struct efc_remote_node *rnode, u32 fc_addr,
|
||||
struct efc_nport *nport)
|
||||
{
|
||||
/* Check for invalid indicator */
|
||||
if (rnode->indicator != U32_MAX) {
|
||||
efc_log_err(efc,
|
||||
"RPI allocation failure addr=%#x rpi=%#x\n",
|
||||
fc_addr, rnode->indicator);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/* NULL SLI port indicates an unallocated remote node */
|
||||
rnode->nport = NULL;
|
||||
|
||||
if (sli_resource_alloc(efc->sli, SLI4_RSRC_RPI,
|
||||
&rnode->indicator, &rnode->index)) {
|
||||
efc_log_err(efc, "RPI allocation failure addr=%#x\n",
|
||||
fc_addr);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
rnode->fc_id = fc_addr;
|
||||
rnode->nport = nport;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
efc_cmd_node_attach_cb(struct efc *efc, int status, u8 *mqe, void *arg)
|
||||
{
|
||||
struct efc_remote_node *rnode = arg;
|
||||
struct sli4_mbox_command_header *hdr =
|
||||
(struct sli4_mbox_command_header *)mqe;
|
||||
int evt = 0;
|
||||
|
||||
if (status || le16_to_cpu(hdr->status)) {
|
||||
efc_log_debug(efc, "bad status cqe=%#x mqe=%#x\n", status,
|
||||
le16_to_cpu(hdr->status));
|
||||
rnode->attached = false;
|
||||
evt = EFC_EVT_NODE_ATTACH_FAIL;
|
||||
} else {
|
||||
rnode->attached = true;
|
||||
evt = EFC_EVT_NODE_ATTACH_OK;
|
||||
}
|
||||
|
||||
efc_remote_node_cb(efc, evt, rnode);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
efc_cmd_node_attach(struct efc *efc, struct efc_remote_node *rnode,
|
||||
struct efc_dma *sparms)
|
||||
{
|
||||
int rc = -EIO;
|
||||
u8 buf[SLI4_BMBX_SIZE];
|
||||
|
||||
if (!rnode || !sparms) {
|
||||
efc_log_err(efc, "bad parameter(s) rnode=%p sparms=%p\n",
|
||||
rnode, sparms);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the attach count is non-zero, this RPI has already been reg'd.
|
||||
* Otherwise, register the RPI
|
||||
*/
|
||||
if (rnode->index == U32_MAX) {
|
||||
efc_log_err(efc, "bad parameter rnode->index invalid\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/* Update a remote node object with the remote port's service params */
|
||||
if (!sli_cmd_reg_rpi(efc->sli, buf, rnode->indicator,
|
||||
rnode->nport->indicator, rnode->fc_id, sparms, 0, 0))
|
||||
rc = efc->tt.issue_mbox_rqst(efc->base, buf,
|
||||
efc_cmd_node_attach_cb, rnode);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
int
|
||||
efc_node_free_resources(struct efc *efc, struct efc_remote_node *rnode)
|
||||
{
|
||||
int rc = 0;
|
||||
|
||||
if (!rnode) {
|
||||
efc_log_err(efc, "bad parameter rnode=%p\n", rnode);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (rnode->nport) {
|
||||
if (rnode->attached) {
|
||||
efc_log_err(efc, "rnode is still attached\n");
|
||||
return -EIO;
|
||||
}
|
||||
if (rnode->indicator != U32_MAX) {
|
||||
if (sli_resource_free(efc->sli, SLI4_RSRC_RPI,
|
||||
rnode->indicator)) {
|
||||
efc_log_err(efc,
|
||||
"RPI free fail RPI %d addr=%#x\n",
|
||||
rnode->indicator, rnode->fc_id);
|
||||
rc = -EIO;
|
||||
} else {
|
||||
rnode->indicator = U32_MAX;
|
||||
rnode->index = U32_MAX;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int
|
||||
efc_cmd_node_free_cb(struct efc *efc, int status, u8 *mqe, void *arg)
|
||||
{
|
||||
struct efc_remote_node *rnode = arg;
|
||||
struct sli4_mbox_command_header *hdr =
|
||||
(struct sli4_mbox_command_header *)mqe;
|
||||
int evt = EFC_EVT_NODE_FREE_FAIL;
|
||||
int rc = 0;
|
||||
|
||||
if (status || le16_to_cpu(hdr->status)) {
|
||||
efc_log_debug(efc, "bad status cqe=%#x mqe=%#x\n", status,
|
||||
le16_to_cpu(hdr->status));
|
||||
|
||||
/*
|
||||
* In certain cases, a non-zero MQE status is OK (all must be
|
||||
* true):
|
||||
* - node is attached
|
||||
* - status is 0x1400
|
||||
*/
|
||||
if (!rnode->attached ||
|
||||
(le16_to_cpu(hdr->status) != SLI4_MBX_STATUS_RPI_NOT_REG))
|
||||
rc = -EIO;
|
||||
}
|
||||
|
||||
if (!rc) {
|
||||
rnode->attached = false;
|
||||
evt = EFC_EVT_NODE_FREE_OK;
|
||||
}
|
||||
|
||||
efc_remote_node_cb(efc, evt, rnode);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
int
|
||||
efc_cmd_node_detach(struct efc *efc, struct efc_remote_node *rnode)
|
||||
{
|
||||
u8 buf[SLI4_BMBX_SIZE];
|
||||
int rc = -EIO;
|
||||
|
||||
if (!rnode) {
|
||||
efc_log_err(efc, "bad parameter rnode=%p\n", rnode);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (rnode->nport) {
|
||||
if (!rnode->attached)
|
||||
return -EIO;
|
||||
|
||||
rc = -EIO;
|
||||
|
||||
if (!sli_cmd_unreg_rpi(efc->sli, buf, rnode->indicator,
|
||||
SLI4_RSRC_RPI, U32_MAX))
|
||||
rc = efc->tt.issue_mbox_rqst(efc->base, buf,
|
||||
efc_cmd_node_free_cb, rnode);
|
||||
|
||||
if (rc != 0) {
|
||||
efc_log_err(efc, "UNREG_RPI failed\n");
|
||||
rc = -EIO;
|
||||
}
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
|
@ -0,0 +1,35 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (C) 2021 Broadcom. All Rights Reserved. The term
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
|
||||
*/
|
||||
|
||||
#ifndef __EFC_CMDS_H__
|
||||
#define __EFC_CMDS_H__
|
||||
|
||||
#define EFC_SPARAM_DMA_SZ 112
|
||||
int
|
||||
efc_cmd_nport_alloc(struct efc *efc, struct efc_nport *nport,
|
||||
struct efc_domain *domain, u8 *wwpn);
|
||||
int
|
||||
efc_cmd_nport_attach(struct efc *efc, struct efc_nport *nport, u32 fc_id);
|
||||
int
|
||||
efc_cmd_nport_free(struct efc *efc, struct efc_nport *nport);
|
||||
int
|
||||
efc_cmd_domain_alloc(struct efc *efc, struct efc_domain *domain, u32 fcf);
|
||||
int
|
||||
efc_cmd_domain_attach(struct efc *efc, struct efc_domain *domain, u32 fc_id);
|
||||
int
|
||||
efc_cmd_domain_free(struct efc *efc, struct efc_domain *domain);
|
||||
int
|
||||
efc_cmd_node_detach(struct efc *efc, struct efc_remote_node *rnode);
|
||||
int
|
||||
efc_node_free_resources(struct efc *efc, struct efc_remote_node *rnode);
|
||||
int
|
||||
efc_cmd_node_attach(struct efc *efc, struct efc_remote_node *rnode,
|
||||
struct efc_dma *sparms);
|
||||
int
|
||||
efc_cmd_node_alloc(struct efc *efc, struct efc_remote_node *rnode, u32 fc_addr,
|
||||
struct efc_nport *nport);
|
||||
|
||||
#endif /* __EFC_CMDS_H */
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,72 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (C) 2021 Broadcom. All Rights Reserved. The term
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Node state machine functions for remote device node sm
|
||||
*/
|
||||
|
||||
#ifndef __EFCT_DEVICE_H__
|
||||
#define __EFCT_DEVICE_H__
|
||||
void
|
||||
efc_node_init_device(struct efc_node *node, bool send_plogi);
|
||||
void
|
||||
efc_process_prli_payload(struct efc_node *node,
|
||||
void *prli);
|
||||
void
|
||||
efc_d_send_prli_rsp(struct efc_node *node, uint16_t ox_id);
|
||||
void
|
||||
efc_send_ls_acc_after_attach(struct efc_node *node,
|
||||
struct fc_frame_header *hdr,
|
||||
enum efc_node_send_ls_acc ls);
|
||||
void
|
||||
__efc_d_wait_loop(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg);
|
||||
void
|
||||
__efc_d_wait_plogi_acc_cmpl(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg);
|
||||
void
|
||||
__efc_d_init(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg);
|
||||
void
|
||||
__efc_d_wait_plogi_rsp(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg);
|
||||
void
|
||||
__efc_d_wait_plogi_rsp_recvd_prli(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg);
|
||||
void
|
||||
__efc_d_wait_domain_attach(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg);
|
||||
void
|
||||
__efc_d_wait_topology_notify(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg);
|
||||
void
|
||||
__efc_d_wait_node_attach(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg);
|
||||
void
|
||||
__efc_d_wait_attach_evt_shutdown(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg);
|
||||
void
|
||||
__efc_d_initiate_shutdown(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg);
|
||||
void
|
||||
__efc_d_port_logged_in(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg);
|
||||
void
|
||||
__efc_d_wait_logo_acc_cmpl(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg);
|
||||
void
|
||||
__efc_d_device_ready(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg);
|
||||
void
|
||||
__efc_d_device_gone(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg);
|
||||
void
|
||||
__efc_d_wait_adisc_rsp(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg);
|
||||
void
|
||||
__efc_d_wait_logo_rsp(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg);
|
||||
|
||||
#endif /* __EFCT_DEVICE_H__ */
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,54 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (C) 2021 Broadcom. All Rights Reserved. The term
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Declare driver's domain handler exported interface
|
||||
*/
|
||||
|
||||
#ifndef __EFCT_DOMAIN_H__
|
||||
#define __EFCT_DOMAIN_H__
|
||||
|
||||
struct efc_domain *
|
||||
efc_domain_alloc(struct efc *efc, uint64_t fcf_wwn);
|
||||
void
|
||||
efc_domain_free(struct efc_domain *domain);
|
||||
|
||||
void
|
||||
__efc_domain_init(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg);
|
||||
void
|
||||
__efc_domain_wait_alloc(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
|
||||
void *arg);
|
||||
void
|
||||
__efc_domain_allocated(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
|
||||
void *arg);
|
||||
void
|
||||
__efc_domain_wait_attach(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
|
||||
void *arg);
|
||||
void
|
||||
__efc_domain_ready(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg);
|
||||
void
|
||||
__efc_domain_wait_nports_free(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
|
||||
void *arg);
|
||||
void
|
||||
__efc_domain_wait_shutdown(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
|
||||
void *arg);
|
||||
void
|
||||
__efc_domain_wait_domain_lost(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
|
||||
void *arg);
|
||||
void
|
||||
efc_domain_attach(struct efc_domain *domain, u32 s_id);
|
||||
int
|
||||
efc_domain_post_event(struct efc_domain *domain, enum efc_sm_event event,
|
||||
void *arg);
|
||||
void
|
||||
__efc_domain_attach_internal(struct efc_domain *domain, u32 s_id);
|
||||
|
||||
int
|
||||
efc_domain_dispatch_frame(void *arg, struct efc_hw_sequence *seq);
|
||||
void
|
||||
efc_node_dispatch_frame(void *arg, struct efc_hw_sequence *seq);
|
||||
|
||||
#endif /* __EFCT_DOMAIN_H__ */
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,107 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (C) 2021 Broadcom. All Rights Reserved. The term
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
|
||||
*/
|
||||
|
||||
#ifndef __EFC_ELS_H__
|
||||
#define __EFC_ELS_H__
|
||||
|
||||
#define EFC_STATUS_INVALID INT_MAX
|
||||
#define EFC_ELS_IO_POOL_SZ 1024
|
||||
|
||||
struct efc_els_io_req {
|
||||
struct list_head list_entry;
|
||||
struct kref ref;
|
||||
void (*release)(struct kref *arg);
|
||||
struct efc_node *node;
|
||||
void *cb;
|
||||
u32 els_retries_remaining;
|
||||
bool els_req_free;
|
||||
struct timer_list delay_timer;
|
||||
|
||||
const char *display_name;
|
||||
|
||||
struct efc_disc_io io;
|
||||
};
|
||||
|
||||
typedef int(*efc_hw_srrs_cb_t)(void *arg, u32 length, int status,
|
||||
u32 ext_status);
|
||||
|
||||
void _efc_els_io_free(struct kref *arg);
|
||||
struct efc_els_io_req *
|
||||
efc_els_io_alloc(struct efc_node *node, u32 reqlen);
|
||||
struct efc_els_io_req *
|
||||
efc_els_io_alloc_size(struct efc_node *node, u32 reqlen, u32 rsplen);
|
||||
void efc_els_io_free(struct efc_els_io_req *els);
|
||||
|
||||
/* ELS command send */
|
||||
typedef void (*els_cb_t)(struct efc_node *node,
|
||||
struct efc_node_cb *cbdata, void *arg);
|
||||
int
|
||||
efc_send_plogi(struct efc_node *node);
|
||||
int
|
||||
efc_send_flogi(struct efc_node *node);
|
||||
int
|
||||
efc_send_fdisc(struct efc_node *node);
|
||||
int
|
||||
efc_send_prli(struct efc_node *node);
|
||||
int
|
||||
efc_send_prlo(struct efc_node *node);
|
||||
int
|
||||
efc_send_logo(struct efc_node *node);
|
||||
int
|
||||
efc_send_adisc(struct efc_node *node);
|
||||
int
|
||||
efc_send_pdisc(struct efc_node *node);
|
||||
int
|
||||
efc_send_scr(struct efc_node *node);
|
||||
int
|
||||
efc_ns_send_rftid(struct efc_node *node);
|
||||
int
|
||||
efc_ns_send_rffid(struct efc_node *node);
|
||||
int
|
||||
efc_ns_send_gidpt(struct efc_node *node);
|
||||
void
|
||||
efc_els_io_cleanup(struct efc_els_io_req *els, int evt, void *arg);
|
||||
|
||||
/* ELS acc send */
|
||||
int
|
||||
efc_send_ls_acc(struct efc_node *node, u32 ox_id);
|
||||
int
|
||||
efc_send_ls_rjt(struct efc_node *node, u32 ox_id, u32 reason_cod,
|
||||
u32 reason_code_expl, u32 vendor_unique);
|
||||
int
|
||||
efc_send_flogi_p2p_acc(struct efc_node *node, u32 ox_id, u32 s_id);
|
||||
int
|
||||
efc_send_flogi_acc(struct efc_node *node, u32 ox_id, u32 is_fport);
|
||||
int
|
||||
efc_send_plogi_acc(struct efc_node *node, u32 ox_id);
|
||||
int
|
||||
efc_send_prli_acc(struct efc_node *node, u32 ox_id);
|
||||
int
|
||||
efc_send_logo_acc(struct efc_node *node, u32 ox_id);
|
||||
int
|
||||
efc_send_prlo_acc(struct efc_node *node, u32 ox_id);
|
||||
int
|
||||
efc_send_adisc_acc(struct efc_node *node, u32 ox_id);
|
||||
|
||||
int
|
||||
efc_bls_send_acc_hdr(struct efc *efc, struct efc_node *node,
|
||||
struct fc_frame_header *hdr);
|
||||
int
|
||||
efc_bls_send_rjt_hdr(struct efc_els_io_req *io, struct fc_frame_header *hdr);
|
||||
|
||||
int
|
||||
efc_els_io_list_empty(struct efc_node *node, struct list_head *list);
|
||||
|
||||
/* CT */
|
||||
int
|
||||
efc_send_ct_rsp(struct efc *efc, struct efc_node *node, u16 ox_id,
|
||||
struct fc_ct_hdr *ct_hdr, u32 cmd_rsp_code, u32 reason_code,
|
||||
u32 reason_code_explanation);
|
||||
|
||||
int
|
||||
efc_send_bls_acc(struct efc_node *node, struct fc_frame_header *hdr);
|
||||
|
||||
#endif /* __EFC_ELS_H__ */
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,116 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (C) 2021 Broadcom. All Rights Reserved. The term
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Declarations for the interface exported by efc_fabric
|
||||
*/
|
||||
|
||||
#ifndef __EFCT_FABRIC_H__
|
||||
#define __EFCT_FABRIC_H__
|
||||
#include "scsi/fc/fc_els.h"
|
||||
#include "scsi/fc/fc_fs.h"
|
||||
#include "scsi/fc/fc_ns.h"
|
||||
|
||||
void
|
||||
__efc_fabric_init(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg);
|
||||
void
|
||||
__efc_fabric_flogi_wait_rsp(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg);
|
||||
void
|
||||
__efc_fabric_domain_attach_wait(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg);
|
||||
void
|
||||
__efc_fabric_wait_domain_attach(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg);
|
||||
|
||||
void
|
||||
__efc_vport_fabric_init(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg);
|
||||
void
|
||||
__efc_fabric_fdisc_wait_rsp(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg);
|
||||
void
|
||||
__efc_fabric_wait_nport_attach(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg);
|
||||
|
||||
void
|
||||
__efc_ns_init(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg);
|
||||
void
|
||||
__efc_ns_plogi_wait_rsp(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg);
|
||||
void
|
||||
__efc_ns_rftid_wait_rsp(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg);
|
||||
void
|
||||
__efc_ns_rffid_wait_rsp(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg);
|
||||
void
|
||||
__efc_ns_wait_node_attach(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg);
|
||||
void
|
||||
__efc_fabric_wait_attach_evt_shutdown(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg);
|
||||
void
|
||||
__efc_ns_logo_wait_rsp(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event, void *arg);
|
||||
void
|
||||
__efc_ns_gidpt_wait_rsp(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg);
|
||||
void
|
||||
__efc_ns_idle(struct efc_sm_ctx *ctx, enum efc_sm_event evt, void *arg);
|
||||
void
|
||||
__efc_ns_gidpt_delay(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg);
|
||||
void
|
||||
__efc_fabctl_init(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg);
|
||||
void
|
||||
__efc_fabctl_wait_node_attach(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg);
|
||||
void
|
||||
__efc_fabctl_wait_scr_rsp(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg);
|
||||
void
|
||||
__efc_fabctl_ready(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg);
|
||||
void
|
||||
__efc_fabctl_wait_ls_acc_cmpl(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg);
|
||||
void
|
||||
__efc_fabric_idle(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg);
|
||||
|
||||
void
|
||||
__efc_p2p_rnode_init(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg);
|
||||
void
|
||||
__efc_p2p_domain_attach_wait(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg);
|
||||
void
|
||||
__efc_p2p_wait_flogi_acc_cmpl(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg);
|
||||
void
|
||||
__efc_p2p_wait_plogi_rsp(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg);
|
||||
void
|
||||
__efc_p2p_wait_plogi_rsp_recvd_prli(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg);
|
||||
void
|
||||
__efc_p2p_wait_domain_attach(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg);
|
||||
void
|
||||
__efc_p2p_wait_node_attach(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg);
|
||||
|
||||
int
|
||||
efc_p2p_setup(struct efc_nport *nport);
|
||||
void
|
||||
efc_fabric_set_topology(struct efc_node *node,
|
||||
enum efc_nport_topology topology);
|
||||
void efc_fabric_notify_topology(struct efc_node *node);
|
||||
|
||||
#endif /* __EFCT_FABRIC_H__ */
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,191 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (C) 2021 Broadcom. All Rights Reserved. The term
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
|
||||
*/
|
||||
|
||||
#if !defined(__EFC_NODE_H__)
|
||||
#define __EFC_NODE_H__
|
||||
#include "scsi/fc/fc_ns.h"
|
||||
|
||||
#define EFC_NODEDB_PAUSE_FABRIC_LOGIN (1 << 0)
|
||||
#define EFC_NODEDB_PAUSE_NAMESERVER (1 << 1)
|
||||
#define EFC_NODEDB_PAUSE_NEW_NODES (1 << 2)
|
||||
|
||||
#define MAX_ACC_REJECT_PAYLOAD sizeof(struct fc_els_ls_rjt)
|
||||
|
||||
#define scsi_io_printf(io, fmt, ...) \
|
||||
efc_log_debug(io->efc, "[%s] [%04x][i:%04x t:%04x h:%04x]" fmt, \
|
||||
io->node->display_name, io->instance_index, io->init_task_tag, \
|
||||
io->tgt_task_tag, io->hw_tag, ##__VA_ARGS__)
|
||||
|
||||
static inline void
|
||||
efc_node_evt_set(struct efc_sm_ctx *ctx, enum efc_sm_event evt,
|
||||
const char *handler)
|
||||
{
|
||||
struct efc_node *node = ctx->app;
|
||||
|
||||
if (evt == EFC_EVT_ENTER) {
|
||||
strncpy(node->current_state_name, handler,
|
||||
sizeof(node->current_state_name));
|
||||
} else if (evt == EFC_EVT_EXIT) {
|
||||
strncpy(node->prev_state_name, node->current_state_name,
|
||||
sizeof(node->prev_state_name));
|
||||
strncpy(node->current_state_name, "invalid",
|
||||
sizeof(node->current_state_name));
|
||||
}
|
||||
node->prev_evt = node->current_evt;
|
||||
node->current_evt = evt;
|
||||
}
|
||||
|
||||
/**
|
||||
* hold frames in pending frame list
|
||||
*
|
||||
* Unsolicited receive frames are held on the node pending frame list,
|
||||
* rather than being processed.
|
||||
*/
|
||||
|
||||
static inline void
|
||||
efc_node_hold_frames(struct efc_node *node)
|
||||
{
|
||||
node->hold_frames = true;
|
||||
}
|
||||
|
||||
/**
|
||||
* accept frames
|
||||
*
|
||||
* Unsolicited receive frames processed rather than being held on the node
|
||||
* pending frame list.
|
||||
*/
|
||||
|
||||
static inline void
|
||||
efc_node_accept_frames(struct efc_node *node)
|
||||
{
|
||||
node->hold_frames = false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Node initiator/target enable defines
|
||||
* All combinations of the SLI port (nport) initiator/target enable,
|
||||
* and remote node initiator/target enable are enumerated.
|
||||
* ex: EFC_NODE_ENABLE_T_TO_IT decodes to target mode is enabled on SLI port
|
||||
* and I+T is enabled on remote node.
|
||||
*/
|
||||
enum efc_node_enable {
|
||||
EFC_NODE_ENABLE_x_TO_x,
|
||||
EFC_NODE_ENABLE_x_TO_T,
|
||||
EFC_NODE_ENABLE_x_TO_I,
|
||||
EFC_NODE_ENABLE_x_TO_IT,
|
||||
EFC_NODE_ENABLE_T_TO_x,
|
||||
EFC_NODE_ENABLE_T_TO_T,
|
||||
EFC_NODE_ENABLE_T_TO_I,
|
||||
EFC_NODE_ENABLE_T_TO_IT,
|
||||
EFC_NODE_ENABLE_I_TO_x,
|
||||
EFC_NODE_ENABLE_I_TO_T,
|
||||
EFC_NODE_ENABLE_I_TO_I,
|
||||
EFC_NODE_ENABLE_I_TO_IT,
|
||||
EFC_NODE_ENABLE_IT_TO_x,
|
||||
EFC_NODE_ENABLE_IT_TO_T,
|
||||
EFC_NODE_ENABLE_IT_TO_I,
|
||||
EFC_NODE_ENABLE_IT_TO_IT,
|
||||
};
|
||||
|
||||
static inline enum efc_node_enable
|
||||
efc_node_get_enable(struct efc_node *node)
|
||||
{
|
||||
u32 retval = 0;
|
||||
|
||||
if (node->nport->enable_ini)
|
||||
retval |= (1U << 3);
|
||||
if (node->nport->enable_tgt)
|
||||
retval |= (1U << 2);
|
||||
if (node->init)
|
||||
retval |= (1U << 1);
|
||||
if (node->targ)
|
||||
retval |= (1U << 0);
|
||||
return (enum efc_node_enable)retval;
|
||||
}
|
||||
|
||||
int
|
||||
efc_node_check_els_req(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg,
|
||||
u8 cmd, void (*efc_node_common_func)(const char *,
|
||||
struct efc_sm_ctx *, enum efc_sm_event, void *),
|
||||
const char *funcname);
|
||||
int
|
||||
efc_node_check_ns_req(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg,
|
||||
u16 cmd, void (*efc_node_common_func)(const char *,
|
||||
struct efc_sm_ctx *, enum efc_sm_event, void *),
|
||||
const char *funcname);
|
||||
int
|
||||
efc_node_attach(struct efc_node *node);
|
||||
struct efc_node *
|
||||
efc_node_alloc(struct efc_nport *nport, u32 port_id,
|
||||
bool init, bool targ);
|
||||
void
|
||||
efc_node_free(struct efc_node *efc);
|
||||
void
|
||||
efc_node_update_display_name(struct efc_node *node);
|
||||
void efc_node_post_event(struct efc_node *node, enum efc_sm_event evt,
|
||||
void *arg);
|
||||
|
||||
void
|
||||
__efc_node_shutdown(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg);
|
||||
void
|
||||
__efc_node_wait_node_free(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg);
|
||||
void
|
||||
__efc_node_wait_els_shutdown(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg);
|
||||
void
|
||||
__efc_node_wait_ios_shutdown(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg);
|
||||
void
|
||||
efc_node_save_sparms(struct efc_node *node, void *payload);
|
||||
void
|
||||
efc_node_transition(struct efc_node *node,
|
||||
void (*state)(struct efc_sm_ctx *, enum efc_sm_event,
|
||||
void *), void *data);
|
||||
void
|
||||
__efc_node_common(const char *funcname, struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg);
|
||||
|
||||
void
|
||||
efc_node_initiate_cleanup(struct efc_node *node);
|
||||
|
||||
void
|
||||
efc_node_build_eui_name(char *buf, u32 buf_len, uint64_t eui_name);
|
||||
|
||||
void
|
||||
efc_node_pause(struct efc_node *node,
|
||||
void (*state)(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg));
|
||||
void
|
||||
__efc_node_paused(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg);
|
||||
int
|
||||
efc_node_active_ios_empty(struct efc_node *node);
|
||||
void
|
||||
efc_node_send_ls_io_cleanup(struct efc_node *node);
|
||||
|
||||
int
|
||||
efc_els_io_list_empty(struct efc_node *node, struct list_head *list);
|
||||
|
||||
void
|
||||
efc_process_node_pending(struct efc_node *domain);
|
||||
|
||||
u64 efc_node_get_wwnn(struct efc_node *node);
|
||||
struct efc_node *
|
||||
efc_node_find(struct efc_nport *nport, u32 id);
|
||||
void
|
||||
efc_node_post_els_resp(struct efc_node *node, u32 evt, void *arg);
|
||||
void
|
||||
efc_node_recv_els_frame(struct efc_node *node, struct efc_hw_sequence *s);
|
||||
void
|
||||
efc_node_recv_ct_frame(struct efc_node *node, struct efc_hw_sequence *seq);
|
||||
void
|
||||
efc_node_recv_fcp_cmd(struct efc_node *node, struct efc_hw_sequence *seq);
|
||||
|
||||
#endif /* __EFC_NODE_H__ */
|
|
@ -0,0 +1,777 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (C) 2021 Broadcom. All Rights Reserved. The term
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
|
||||
*/
|
||||
|
||||
/*
|
||||
* NPORT
|
||||
*
|
||||
* Port object for physical port and NPIV ports.
|
||||
*/
|
||||
|
||||
/*
|
||||
* NPORT REFERENCE COUNTING
|
||||
*
|
||||
* A nport reference should be taken when:
|
||||
* - an nport is allocated
|
||||
* - a vport populates associated nport
|
||||
* - a remote node is allocated
|
||||
* - a unsolicited frame is processed
|
||||
* The reference should be dropped when:
|
||||
* - the unsolicited frame processesing is done
|
||||
* - the remote node is removed
|
||||
* - the vport is removed
|
||||
* - the nport is removed
|
||||
*/
|
||||
|
||||
#include "efc.h"
|
||||
|
||||
void
|
||||
efc_nport_cb(void *arg, int event, void *data)
|
||||
{
|
||||
struct efc *efc = arg;
|
||||
struct efc_nport *nport = data;
|
||||
unsigned long flags = 0;
|
||||
|
||||
efc_log_debug(efc, "nport event: %s\n", efc_sm_event_name(event));
|
||||
|
||||
spin_lock_irqsave(&efc->lock, flags);
|
||||
efc_sm_post_event(&nport->sm, event, NULL);
|
||||
spin_unlock_irqrestore(&efc->lock, flags);
|
||||
}
|
||||
|
||||
static struct efc_nport *
|
||||
efc_nport_find_wwn(struct efc_domain *domain, uint64_t wwnn, uint64_t wwpn)
|
||||
{
|
||||
struct efc_nport *nport = NULL;
|
||||
|
||||
/* Find a nport, given the WWNN and WWPN */
|
||||
list_for_each_entry(nport, &domain->nport_list, list_entry) {
|
||||
if (nport->wwnn == wwnn && nport->wwpn == wwpn)
|
||||
return nport;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void
|
||||
_efc_nport_free(struct kref *arg)
|
||||
{
|
||||
struct efc_nport *nport = container_of(arg, struct efc_nport, ref);
|
||||
|
||||
kfree(nport);
|
||||
}
|
||||
|
||||
struct efc_nport *
|
||||
efc_nport_alloc(struct efc_domain *domain, uint64_t wwpn, uint64_t wwnn,
|
||||
u32 fc_id, bool enable_ini, bool enable_tgt)
|
||||
{
|
||||
struct efc_nport *nport;
|
||||
|
||||
if (domain->efc->enable_ini)
|
||||
enable_ini = 0;
|
||||
|
||||
/* Return a failure if this nport has already been allocated */
|
||||
if ((wwpn != 0) || (wwnn != 0)) {
|
||||
nport = efc_nport_find_wwn(domain, wwnn, wwpn);
|
||||
if (nport) {
|
||||
efc_log_err(domain->efc,
|
||||
"NPORT %016llX %016llX already allocated\n",
|
||||
wwnn, wwpn);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
nport = kzalloc(sizeof(*nport), GFP_ATOMIC);
|
||||
if (!nport)
|
||||
return nport;
|
||||
|
||||
/* initialize refcount */
|
||||
kref_init(&nport->ref);
|
||||
nport->release = _efc_nport_free;
|
||||
|
||||
nport->efc = domain->efc;
|
||||
snprintf(nport->display_name, sizeof(nport->display_name), "------");
|
||||
nport->domain = domain;
|
||||
xa_init(&nport->lookup);
|
||||
nport->instance_index = domain->nport_count++;
|
||||
nport->sm.app = nport;
|
||||
nport->enable_ini = enable_ini;
|
||||
nport->enable_tgt = enable_tgt;
|
||||
nport->enable_rscn = (nport->enable_ini ||
|
||||
(nport->enable_tgt && enable_target_rscn(nport->efc)));
|
||||
|
||||
/* Copy service parameters from domain */
|
||||
memcpy(nport->service_params, domain->service_params,
|
||||
sizeof(struct fc_els_flogi));
|
||||
|
||||
/* Update requested fc_id */
|
||||
nport->fc_id = fc_id;
|
||||
|
||||
/* Update the nport's service parameters for the new wwn's */
|
||||
nport->wwpn = wwpn;
|
||||
nport->wwnn = wwnn;
|
||||
snprintf(nport->wwnn_str, sizeof(nport->wwnn_str), "%016llX",
|
||||
(unsigned long long)wwnn);
|
||||
|
||||
/*
|
||||
* if this is the "first" nport of the domain,
|
||||
* then make it the "phys" nport
|
||||
*/
|
||||
if (list_empty(&domain->nport_list))
|
||||
domain->nport = nport;
|
||||
|
||||
INIT_LIST_HEAD(&nport->list_entry);
|
||||
list_add_tail(&nport->list_entry, &domain->nport_list);
|
||||
|
||||
kref_get(&domain->ref);
|
||||
|
||||
efc_log_debug(domain->efc, "New Nport [%s]\n", nport->display_name);
|
||||
|
||||
return nport;
|
||||
}
|
||||
|
||||
void
|
||||
efc_nport_free(struct efc_nport *nport)
|
||||
{
|
||||
struct efc_domain *domain;
|
||||
|
||||
if (!nport)
|
||||
return;
|
||||
|
||||
domain = nport->domain;
|
||||
efc_log_debug(domain->efc, "[%s] free nport\n", nport->display_name);
|
||||
list_del(&nport->list_entry);
|
||||
/*
|
||||
* if this is the physical nport,
|
||||
* then clear it out of the domain
|
||||
*/
|
||||
if (nport == domain->nport)
|
||||
domain->nport = NULL;
|
||||
|
||||
xa_destroy(&nport->lookup);
|
||||
xa_erase(&domain->lookup, nport->fc_id);
|
||||
|
||||
if (list_empty(&domain->nport_list))
|
||||
efc_domain_post_event(domain, EFC_EVT_ALL_CHILD_NODES_FREE,
|
||||
NULL);
|
||||
|
||||
kref_put(&domain->ref, domain->release);
|
||||
kref_put(&nport->ref, nport->release);
|
||||
}
|
||||
|
||||
struct efc_nport *
|
||||
efc_nport_find(struct efc_domain *domain, u32 d_id)
|
||||
{
|
||||
struct efc_nport *nport;
|
||||
|
||||
/* Find a nport object, given an FC_ID */
|
||||
nport = xa_load(&domain->lookup, d_id);
|
||||
if (!nport || !kref_get_unless_zero(&nport->ref))
|
||||
return NULL;
|
||||
|
||||
return nport;
|
||||
}
|
||||
|
||||
int
|
||||
efc_nport_attach(struct efc_nport *nport, u32 fc_id)
|
||||
{
|
||||
int rc;
|
||||
struct efc_node *node;
|
||||
struct efc *efc = nport->efc;
|
||||
unsigned long index;
|
||||
|
||||
/* Set our lookup */
|
||||
rc = xa_err(xa_store(&nport->domain->lookup, fc_id, nport, GFP_ATOMIC));
|
||||
if (rc) {
|
||||
efc_log_err(efc, "Sport lookup store failed: %d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Update our display_name */
|
||||
efc_node_fcid_display(fc_id, nport->display_name,
|
||||
sizeof(nport->display_name));
|
||||
|
||||
xa_for_each(&nport->lookup, index, node) {
|
||||
efc_node_update_display_name(node);
|
||||
}
|
||||
|
||||
efc_log_debug(nport->efc, "[%s] attach nport: fc_id x%06x\n",
|
||||
nport->display_name, fc_id);
|
||||
|
||||
/* Register a nport, given an FC_ID */
|
||||
rc = efc_cmd_nport_attach(efc, nport, fc_id);
|
||||
if (rc < 0) {
|
||||
efc_log_err(nport->efc,
|
||||
"efc_hw_port_attach failed: %d\n", rc);
|
||||
return -EIO;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
efc_nport_shutdown(struct efc_nport *nport)
|
||||
{
|
||||
struct efc *efc = nport->efc;
|
||||
struct efc_node *node;
|
||||
unsigned long index;
|
||||
|
||||
xa_for_each(&nport->lookup, index, node) {
|
||||
if (!(node->rnode.fc_id == FC_FID_FLOGI && nport->is_vport)) {
|
||||
efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL);
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* If this is a vport, logout of the fabric
|
||||
* controller so that it deletes the vport
|
||||
* on the switch.
|
||||
*/
|
||||
/* if link is down, don't send logo */
|
||||
if (efc->link_status == EFC_LINK_STATUS_DOWN) {
|
||||
efc_node_post_event(node, EFC_EVT_SHUTDOWN, NULL);
|
||||
continue;
|
||||
}
|
||||
|
||||
efc_log_debug(efc, "[%s] nport shutdown vport, send logo\n",
|
||||
node->display_name);
|
||||
|
||||
if (!efc_send_logo(node)) {
|
||||
/* sent LOGO, wait for response */
|
||||
efc_node_transition(node, __efc_d_wait_logo_rsp, NULL);
|
||||
continue;
|
||||
}
|
||||
|
||||
/*
|
||||
* failed to send LOGO,
|
||||
* go ahead and cleanup node anyways
|
||||
*/
|
||||
node_printf(node, "Failed to send LOGO\n");
|
||||
efc_node_post_event(node, EFC_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
efc_vport_link_down(struct efc_nport *nport)
|
||||
{
|
||||
struct efc *efc = nport->efc;
|
||||
struct efc_vport *vport;
|
||||
|
||||
/* Clear the nport reference in the vport specification */
|
||||
list_for_each_entry(vport, &efc->vport_list, list_entry) {
|
||||
if (vport->nport == nport) {
|
||||
kref_put(&nport->ref, nport->release);
|
||||
vport->nport = NULL;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
__efc_nport_common(const char *funcname, struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg)
|
||||
{
|
||||
struct efc_nport *nport = ctx->app;
|
||||
struct efc_domain *domain = nport->domain;
|
||||
struct efc *efc = nport->efc;
|
||||
|
||||
switch (evt) {
|
||||
case EFC_EVT_ENTER:
|
||||
case EFC_EVT_REENTER:
|
||||
case EFC_EVT_EXIT:
|
||||
case EFC_EVT_ALL_CHILD_NODES_FREE:
|
||||
break;
|
||||
case EFC_EVT_NPORT_ATTACH_OK:
|
||||
efc_sm_transition(ctx, __efc_nport_attached, NULL);
|
||||
break;
|
||||
case EFC_EVT_SHUTDOWN:
|
||||
/* Flag this nport as shutting down */
|
||||
nport->shutting_down = true;
|
||||
|
||||
if (nport->is_vport)
|
||||
efc_vport_link_down(nport);
|
||||
|
||||
if (xa_empty(&nport->lookup)) {
|
||||
/* Remove the nport from the domain's lookup table */
|
||||
xa_erase(&domain->lookup, nport->fc_id);
|
||||
efc_sm_transition(ctx, __efc_nport_wait_port_free,
|
||||
NULL);
|
||||
if (efc_cmd_nport_free(efc, nport)) {
|
||||
efc_log_debug(nport->efc,
|
||||
"efc_hw_port_free failed\n");
|
||||
/* Not much we can do, free the nport anyways */
|
||||
efc_nport_free(nport);
|
||||
}
|
||||
} else {
|
||||
/* sm: node list is not empty / shutdown nodes */
|
||||
efc_sm_transition(ctx,
|
||||
__efc_nport_wait_shutdown, NULL);
|
||||
efc_nport_shutdown(nport);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
efc_log_debug(nport->efc, "[%s] %-20s %-20s not handled\n",
|
||||
nport->display_name, funcname,
|
||||
efc_sm_event_name(evt));
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
__efc_nport_allocated(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg)
|
||||
{
|
||||
struct efc_nport *nport = ctx->app;
|
||||
struct efc_domain *domain = nport->domain;
|
||||
|
||||
nport_sm_trace(nport);
|
||||
|
||||
switch (evt) {
|
||||
/* the physical nport is attached */
|
||||
case EFC_EVT_NPORT_ATTACH_OK:
|
||||
WARN_ON(nport != domain->nport);
|
||||
efc_sm_transition(ctx, __efc_nport_attached, NULL);
|
||||
break;
|
||||
|
||||
case EFC_EVT_NPORT_ALLOC_OK:
|
||||
/* ignore */
|
||||
break;
|
||||
default:
|
||||
__efc_nport_common(__func__, ctx, evt, arg);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
__efc_nport_vport_init(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg)
|
||||
{
|
||||
struct efc_nport *nport = ctx->app;
|
||||
struct efc *efc = nport->efc;
|
||||
|
||||
nport_sm_trace(nport);
|
||||
|
||||
switch (evt) {
|
||||
case EFC_EVT_ENTER: {
|
||||
__be64 be_wwpn = cpu_to_be64(nport->wwpn);
|
||||
|
||||
if (nport->wwpn == 0)
|
||||
efc_log_debug(efc, "vport: letting f/w select WWN\n");
|
||||
|
||||
if (nport->fc_id != U32_MAX) {
|
||||
efc_log_debug(efc, "vport: hard coding port id: %x\n",
|
||||
nport->fc_id);
|
||||
}
|
||||
|
||||
efc_sm_transition(ctx, __efc_nport_vport_wait_alloc, NULL);
|
||||
/* If wwpn is zero, then we'll let the f/w assign wwpn*/
|
||||
if (efc_cmd_nport_alloc(efc, nport, nport->domain,
|
||||
nport->wwpn == 0 ? NULL :
|
||||
(uint8_t *)&be_wwpn)) {
|
||||
efc_log_err(efc, "Can't allocate port\n");
|
||||
break;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
default:
|
||||
__efc_nport_common(__func__, ctx, evt, arg);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
__efc_nport_vport_wait_alloc(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg)
|
||||
{
|
||||
struct efc_nport *nport = ctx->app;
|
||||
struct efc *efc = nport->efc;
|
||||
|
||||
nport_sm_trace(nport);
|
||||
|
||||
switch (evt) {
|
||||
case EFC_EVT_NPORT_ALLOC_OK: {
|
||||
struct fc_els_flogi *sp;
|
||||
|
||||
sp = (struct fc_els_flogi *)nport->service_params;
|
||||
|
||||
if (nport->wwnn == 0) {
|
||||
nport->wwnn = be64_to_cpu(nport->sli_wwnn);
|
||||
nport->wwpn = be64_to_cpu(nport->sli_wwpn);
|
||||
snprintf(nport->wwnn_str, sizeof(nport->wwnn_str),
|
||||
"%016llX", nport->wwpn);
|
||||
}
|
||||
|
||||
/* Update the nport's service parameters */
|
||||
sp->fl_wwpn = cpu_to_be64(nport->wwpn);
|
||||
sp->fl_wwnn = cpu_to_be64(nport->wwnn);
|
||||
|
||||
/*
|
||||
* if nport->fc_id is uninitialized,
|
||||
* then request that the fabric node use FDISC
|
||||
* to find an fc_id.
|
||||
* Otherwise we're restoring vports, or we're in
|
||||
* fabric emulation mode, so attach the fc_id
|
||||
*/
|
||||
if (nport->fc_id == U32_MAX) {
|
||||
struct efc_node *fabric;
|
||||
|
||||
fabric = efc_node_alloc(nport, FC_FID_FLOGI, false,
|
||||
false);
|
||||
if (!fabric) {
|
||||
efc_log_err(efc, "efc_node_alloc() failed\n");
|
||||
return;
|
||||
}
|
||||
efc_node_transition(fabric, __efc_vport_fabric_init,
|
||||
NULL);
|
||||
} else {
|
||||
snprintf(nport->wwnn_str, sizeof(nport->wwnn_str),
|
||||
"%016llX", nport->wwpn);
|
||||
efc_nport_attach(nport, nport->fc_id);
|
||||
}
|
||||
efc_sm_transition(ctx, __efc_nport_vport_allocated, NULL);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
__efc_nport_common(__func__, ctx, evt, arg);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
__efc_nport_vport_allocated(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg)
|
||||
{
|
||||
struct efc_nport *nport = ctx->app;
|
||||
struct efc *efc = nport->efc;
|
||||
|
||||
nport_sm_trace(nport);
|
||||
|
||||
/*
|
||||
* This state is entered after the nport is allocated;
|
||||
* it then waits for a fabric node
|
||||
* FDISC to complete, which requests a nport attach.
|
||||
* The nport attach complete is handled in this state.
|
||||
*/
|
||||
switch (evt) {
|
||||
case EFC_EVT_NPORT_ATTACH_OK: {
|
||||
struct efc_node *node;
|
||||
|
||||
/* Find our fabric node, and forward this event */
|
||||
node = efc_node_find(nport, FC_FID_FLOGI);
|
||||
if (!node) {
|
||||
efc_log_debug(efc, "can't find node %06x\n", FC_FID_FLOGI);
|
||||
break;
|
||||
}
|
||||
/* sm: / forward nport attach to fabric node */
|
||||
efc_node_post_event(node, evt, NULL);
|
||||
efc_sm_transition(ctx, __efc_nport_attached, NULL);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
__efc_nport_common(__func__, ctx, evt, arg);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
efc_vport_update_spec(struct efc_nport *nport)
|
||||
{
|
||||
struct efc *efc = nport->efc;
|
||||
struct efc_vport *vport;
|
||||
unsigned long flags = 0;
|
||||
|
||||
spin_lock_irqsave(&efc->vport_lock, flags);
|
||||
list_for_each_entry(vport, &efc->vport_list, list_entry) {
|
||||
if (vport->nport == nport) {
|
||||
vport->wwnn = nport->wwnn;
|
||||
vport->wwpn = nport->wwpn;
|
||||
vport->tgt_data = nport->tgt_data;
|
||||
vport->ini_data = nport->ini_data;
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&efc->vport_lock, flags);
|
||||
}
|
||||
|
||||
void
|
||||
__efc_nport_attached(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg)
|
||||
{
|
||||
struct efc_nport *nport = ctx->app;
|
||||
struct efc *efc = nport->efc;
|
||||
|
||||
nport_sm_trace(nport);
|
||||
|
||||
switch (evt) {
|
||||
case EFC_EVT_ENTER: {
|
||||
struct efc_node *node;
|
||||
unsigned long index;
|
||||
|
||||
efc_log_debug(efc,
|
||||
"[%s] NPORT attached WWPN %016llX WWNN %016llX\n",
|
||||
nport->display_name,
|
||||
nport->wwpn, nport->wwnn);
|
||||
|
||||
xa_for_each(&nport->lookup, index, node)
|
||||
efc_node_update_display_name(node);
|
||||
|
||||
efc->tt.new_nport(efc, nport);
|
||||
|
||||
/*
|
||||
* Update the vport (if its not the physical nport)
|
||||
* parameters
|
||||
*/
|
||||
if (nport->is_vport)
|
||||
efc_vport_update_spec(nport);
|
||||
break;
|
||||
}
|
||||
|
||||
case EFC_EVT_EXIT:
|
||||
efc_log_debug(efc,
|
||||
"[%s] NPORT deattached WWPN %016llX WWNN %016llX\n",
|
||||
nport->display_name,
|
||||
nport->wwpn, nport->wwnn);
|
||||
|
||||
efc->tt.del_nport(efc, nport);
|
||||
break;
|
||||
default:
|
||||
__efc_nport_common(__func__, ctx, evt, arg);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
__efc_nport_wait_shutdown(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg)
|
||||
{
|
||||
struct efc_nport *nport = ctx->app;
|
||||
struct efc_domain *domain = nport->domain;
|
||||
struct efc *efc = nport->efc;
|
||||
|
||||
nport_sm_trace(nport);
|
||||
|
||||
switch (evt) {
|
||||
case EFC_EVT_NPORT_ALLOC_OK:
|
||||
case EFC_EVT_NPORT_ALLOC_FAIL:
|
||||
case EFC_EVT_NPORT_ATTACH_OK:
|
||||
case EFC_EVT_NPORT_ATTACH_FAIL:
|
||||
/* ignore these events - just wait for the all free event */
|
||||
break;
|
||||
|
||||
case EFC_EVT_ALL_CHILD_NODES_FREE: {
|
||||
/*
|
||||
* Remove the nport from the domain's
|
||||
* sparse vector lookup table
|
||||
*/
|
||||
xa_erase(&domain->lookup, nport->fc_id);
|
||||
efc_sm_transition(ctx, __efc_nport_wait_port_free, NULL);
|
||||
if (efc_cmd_nport_free(efc, nport)) {
|
||||
efc_log_err(nport->efc, "efc_hw_port_free failed\n");
|
||||
/* Not much we can do, free the nport anyways */
|
||||
efc_nport_free(nport);
|
||||
}
|
||||
break;
|
||||
}
|
||||
default:
|
||||
__efc_nport_common(__func__, ctx, evt, arg);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
__efc_nport_wait_port_free(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg)
|
||||
{
|
||||
struct efc_nport *nport = ctx->app;
|
||||
|
||||
nport_sm_trace(nport);
|
||||
|
||||
switch (evt) {
|
||||
case EFC_EVT_NPORT_ATTACH_OK:
|
||||
/* Ignore as we are waiting for the free CB */
|
||||
break;
|
||||
case EFC_EVT_NPORT_FREE_OK: {
|
||||
/* All done, free myself */
|
||||
efc_nport_free(nport);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
__efc_nport_common(__func__, ctx, evt, arg);
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
efc_vport_nport_alloc(struct efc_domain *domain, struct efc_vport *vport)
|
||||
{
|
||||
struct efc_nport *nport;
|
||||
|
||||
lockdep_assert_held(&domain->efc->lock);
|
||||
|
||||
nport = efc_nport_alloc(domain, vport->wwpn, vport->wwnn, vport->fc_id,
|
||||
vport->enable_ini, vport->enable_tgt);
|
||||
vport->nport = nport;
|
||||
if (!nport)
|
||||
return -EIO;
|
||||
|
||||
kref_get(&nport->ref);
|
||||
nport->is_vport = true;
|
||||
nport->tgt_data = vport->tgt_data;
|
||||
nport->ini_data = vport->ini_data;
|
||||
|
||||
efc_sm_transition(&nport->sm, __efc_nport_vport_init, NULL);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
efc_vport_start(struct efc_domain *domain)
|
||||
{
|
||||
struct efc *efc = domain->efc;
|
||||
struct efc_vport *vport;
|
||||
struct efc_vport *next;
|
||||
int rc = 0;
|
||||
unsigned long flags = 0;
|
||||
|
||||
/* Use the vport spec to find the associated vports and start them */
|
||||
spin_lock_irqsave(&efc->vport_lock, flags);
|
||||
list_for_each_entry_safe(vport, next, &efc->vport_list, list_entry) {
|
||||
if (!vport->nport) {
|
||||
if (efc_vport_nport_alloc(domain, vport))
|
||||
rc = -EIO;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&efc->vport_lock, flags);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
int
|
||||
efc_nport_vport_new(struct efc_domain *domain, uint64_t wwpn, uint64_t wwnn,
|
||||
u32 fc_id, bool ini, bool tgt, void *tgt_data,
|
||||
void *ini_data)
|
||||
{
|
||||
struct efc *efc = domain->efc;
|
||||
struct efc_vport *vport;
|
||||
int rc = 0;
|
||||
unsigned long flags = 0;
|
||||
|
||||
if (ini && domain->efc->enable_ini == 0) {
|
||||
efc_log_debug(efc, "driver initiator mode not enabled\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
if (tgt && domain->efc->enable_tgt == 0) {
|
||||
efc_log_debug(efc, "driver target mode not enabled\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
/*
|
||||
* Create a vport spec if we need to recreate
|
||||
* this vport after a link up event
|
||||
*/
|
||||
vport = efc_vport_create_spec(domain->efc, wwnn, wwpn, fc_id, ini, tgt,
|
||||
tgt_data, ini_data);
|
||||
if (!vport) {
|
||||
efc_log_err(efc, "failed to create vport object entry\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&efc->lock, flags);
|
||||
rc = efc_vport_nport_alloc(domain, vport);
|
||||
spin_unlock_irqrestore(&efc->lock, flags);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
int
|
||||
efc_nport_vport_del(struct efc *efc, struct efc_domain *domain,
|
||||
u64 wwpn, uint64_t wwnn)
|
||||
{
|
||||
struct efc_nport *nport;
|
||||
struct efc_vport *vport;
|
||||
struct efc_vport *next;
|
||||
unsigned long flags = 0;
|
||||
|
||||
spin_lock_irqsave(&efc->vport_lock, flags);
|
||||
/* walk the efc_vport_list and remove from there */
|
||||
list_for_each_entry_safe(vport, next, &efc->vport_list, list_entry) {
|
||||
if (vport->wwpn == wwpn && vport->wwnn == wwnn) {
|
||||
list_del(&vport->list_entry);
|
||||
kfree(vport);
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&efc->vport_lock, flags);
|
||||
|
||||
if (!domain) {
|
||||
/* No domain means no nport to look for */
|
||||
return 0;
|
||||
}
|
||||
|
||||
spin_lock_irqsave(&efc->lock, flags);
|
||||
list_for_each_entry(nport, &domain->nport_list, list_entry) {
|
||||
if (nport->wwpn == wwpn && nport->wwnn == wwnn) {
|
||||
kref_put(&nport->ref, nport->release);
|
||||
/* Shutdown this NPORT */
|
||||
efc_sm_post_event(&nport->sm, EFC_EVT_SHUTDOWN, NULL);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&efc->lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
efc_vport_del_all(struct efc *efc)
|
||||
{
|
||||
struct efc_vport *vport;
|
||||
struct efc_vport *next;
|
||||
unsigned long flags = 0;
|
||||
|
||||
spin_lock_irqsave(&efc->vport_lock, flags);
|
||||
list_for_each_entry_safe(vport, next, &efc->vport_list, list_entry) {
|
||||
list_del(&vport->list_entry);
|
||||
kfree(vport);
|
||||
}
|
||||
spin_unlock_irqrestore(&efc->vport_lock, flags);
|
||||
}
|
||||
|
||||
struct efc_vport *
|
||||
efc_vport_create_spec(struct efc *efc, uint64_t wwnn, uint64_t wwpn,
|
||||
u32 fc_id, bool enable_ini,
|
||||
bool enable_tgt, void *tgt_data, void *ini_data)
|
||||
{
|
||||
struct efc_vport *vport;
|
||||
unsigned long flags = 0;
|
||||
|
||||
/*
|
||||
* walk the efc_vport_list and return failure
|
||||
* if a valid(vport with non zero WWPN and WWNN) vport entry
|
||||
* is already created
|
||||
*/
|
||||
spin_lock_irqsave(&efc->vport_lock, flags);
|
||||
list_for_each_entry(vport, &efc->vport_list, list_entry) {
|
||||
if ((wwpn && vport->wwpn == wwpn) &&
|
||||
(wwnn && vport->wwnn == wwnn)) {
|
||||
efc_log_err(efc,
|
||||
"VPORT %016llX %016llX already allocated\n",
|
||||
wwnn, wwpn);
|
||||
spin_unlock_irqrestore(&efc->vport_lock, flags);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
vport = kzalloc(sizeof(*vport), GFP_ATOMIC);
|
||||
if (!vport) {
|
||||
spin_unlock_irqrestore(&efc->vport_lock, flags);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
vport->wwnn = wwnn;
|
||||
vport->wwpn = wwpn;
|
||||
vport->fc_id = fc_id;
|
||||
vport->enable_tgt = enable_tgt;
|
||||
vport->enable_ini = enable_ini;
|
||||
vport->tgt_data = tgt_data;
|
||||
vport->ini_data = ini_data;
|
||||
|
||||
INIT_LIST_HEAD(&vport->list_entry);
|
||||
list_add_tail(&vport->list_entry, &efc->vport_list);
|
||||
spin_unlock_irqrestore(&efc->vport_lock, flags);
|
||||
return vport;
|
||||
}
|
|
@ -0,0 +1,50 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (C) 2021 Broadcom. All Rights Reserved. The term
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
|
||||
*/
|
||||
|
||||
/**
|
||||
* EFC FC port (NPORT) exported declarations
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __EFC_NPORT_H__
|
||||
#define __EFC_NPORT_H__
|
||||
|
||||
struct efc_nport *
|
||||
efc_nport_find(struct efc_domain *domain, u32 d_id);
|
||||
struct efc_nport *
|
||||
efc_nport_alloc(struct efc_domain *domain, uint64_t wwpn, uint64_t wwnn,
|
||||
u32 fc_id, bool enable_ini, bool enable_tgt);
|
||||
void
|
||||
efc_nport_free(struct efc_nport *nport);
|
||||
int
|
||||
efc_nport_attach(struct efc_nport *nport, u32 fc_id);
|
||||
|
||||
void
|
||||
__efc_nport_allocated(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg);
|
||||
void
|
||||
__efc_nport_wait_shutdown(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg);
|
||||
void
|
||||
__efc_nport_wait_port_free(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg);
|
||||
void
|
||||
__efc_nport_vport_init(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg);
|
||||
void
|
||||
__efc_nport_vport_wait_alloc(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg);
|
||||
void
|
||||
__efc_nport_vport_allocated(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg);
|
||||
void
|
||||
__efc_nport_attached(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg);
|
||||
|
||||
int
|
||||
efc_vport_start(struct efc_domain *domain);
|
||||
|
||||
#endif /* __EFC_NPORT_H__ */
|
|
@ -0,0 +1,54 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (C) 2021 Broadcom. All Rights Reserved. The term
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Generic state machine framework.
|
||||
*/
|
||||
#include "efc.h"
|
||||
#include "efc_sm.h"
|
||||
|
||||
/**
|
||||
* efc_sm_post_event() - Post an event to a context.
|
||||
*
|
||||
* @ctx: State machine context
|
||||
* @evt: Event to post
|
||||
* @data: Event-specific data (if any)
|
||||
*/
|
||||
int
|
||||
efc_sm_post_event(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *data)
|
||||
{
|
||||
if (!ctx->current_state)
|
||||
return -EIO;
|
||||
|
||||
ctx->current_state(ctx, evt, data);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
efc_sm_transition(struct efc_sm_ctx *ctx,
|
||||
void (*state)(struct efc_sm_ctx *,
|
||||
enum efc_sm_event, void *), void *data)
|
||||
|
||||
{
|
||||
if (ctx->current_state == state) {
|
||||
efc_sm_post_event(ctx, EFC_EVT_REENTER, data);
|
||||
} else {
|
||||
efc_sm_post_event(ctx, EFC_EVT_EXIT, data);
|
||||
ctx->current_state = state;
|
||||
efc_sm_post_event(ctx, EFC_EVT_ENTER, data);
|
||||
}
|
||||
}
|
||||
|
||||
static char *event_name[] = EFC_SM_EVENT_NAME;
|
||||
|
||||
const char *efc_sm_event_name(enum efc_sm_event evt)
|
||||
{
|
||||
if (evt > EFC_EVT_LAST)
|
||||
return "unknown";
|
||||
|
||||
return event_name[evt];
|
||||
}
|
|
@ -0,0 +1,197 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (C) 2021 Broadcom. All Rights Reserved. The term
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
|
||||
*
|
||||
*/
|
||||
|
||||
/**
|
||||
* Generic state machine framework declarations.
|
||||
*/
|
||||
|
||||
#ifndef _EFC_SM_H
|
||||
#define _EFC_SM_H
|
||||
|
||||
struct efc_sm_ctx;
|
||||
|
||||
/* State Machine events */
|
||||
enum efc_sm_event {
|
||||
/* Common Events */
|
||||
EFC_EVT_ENTER,
|
||||
EFC_EVT_REENTER,
|
||||
EFC_EVT_EXIT,
|
||||
EFC_EVT_SHUTDOWN,
|
||||
EFC_EVT_ALL_CHILD_NODES_FREE,
|
||||
EFC_EVT_RESUME,
|
||||
EFC_EVT_TIMER_EXPIRED,
|
||||
|
||||
/* Domain Events */
|
||||
EFC_EVT_RESPONSE,
|
||||
EFC_EVT_ERROR,
|
||||
|
||||
EFC_EVT_DOMAIN_FOUND,
|
||||
EFC_EVT_DOMAIN_ALLOC_OK,
|
||||
EFC_EVT_DOMAIN_ALLOC_FAIL,
|
||||
EFC_EVT_DOMAIN_REQ_ATTACH,
|
||||
EFC_EVT_DOMAIN_ATTACH_OK,
|
||||
EFC_EVT_DOMAIN_ATTACH_FAIL,
|
||||
EFC_EVT_DOMAIN_LOST,
|
||||
EFC_EVT_DOMAIN_FREE_OK,
|
||||
EFC_EVT_DOMAIN_FREE_FAIL,
|
||||
EFC_EVT_HW_DOMAIN_REQ_ATTACH,
|
||||
EFC_EVT_HW_DOMAIN_REQ_FREE,
|
||||
|
||||
/* Sport Events */
|
||||
EFC_EVT_NPORT_ALLOC_OK,
|
||||
EFC_EVT_NPORT_ALLOC_FAIL,
|
||||
EFC_EVT_NPORT_ATTACH_OK,
|
||||
EFC_EVT_NPORT_ATTACH_FAIL,
|
||||
EFC_EVT_NPORT_FREE_OK,
|
||||
EFC_EVT_NPORT_FREE_FAIL,
|
||||
EFC_EVT_NPORT_TOPOLOGY_NOTIFY,
|
||||
EFC_EVT_HW_PORT_ALLOC_OK,
|
||||
EFC_EVT_HW_PORT_ALLOC_FAIL,
|
||||
EFC_EVT_HW_PORT_ATTACH_OK,
|
||||
EFC_EVT_HW_PORT_REQ_ATTACH,
|
||||
EFC_EVT_HW_PORT_REQ_FREE,
|
||||
EFC_EVT_HW_PORT_FREE_OK,
|
||||
|
||||
/* Login Events */
|
||||
EFC_EVT_SRRS_ELS_REQ_OK,
|
||||
EFC_EVT_SRRS_ELS_CMPL_OK,
|
||||
EFC_EVT_SRRS_ELS_REQ_FAIL,
|
||||
EFC_EVT_SRRS_ELS_CMPL_FAIL,
|
||||
EFC_EVT_SRRS_ELS_REQ_RJT,
|
||||
EFC_EVT_NODE_ATTACH_OK,
|
||||
EFC_EVT_NODE_ATTACH_FAIL,
|
||||
EFC_EVT_NODE_FREE_OK,
|
||||
EFC_EVT_NODE_FREE_FAIL,
|
||||
EFC_EVT_ELS_FRAME,
|
||||
EFC_EVT_ELS_REQ_TIMEOUT,
|
||||
EFC_EVT_ELS_REQ_ABORTED,
|
||||
/* request an ELS IO be aborted */
|
||||
EFC_EVT_ABORT_ELS,
|
||||
/* ELS abort process complete */
|
||||
EFC_EVT_ELS_ABORT_CMPL,
|
||||
|
||||
EFC_EVT_ABTS_RCVD,
|
||||
|
||||
/* node is not in the GID_PT payload */
|
||||
EFC_EVT_NODE_MISSING,
|
||||
/* node is allocated and in the GID_PT payload */
|
||||
EFC_EVT_NODE_REFOUND,
|
||||
/* node shutting down due to PLOGI recvd (implicit logo) */
|
||||
EFC_EVT_SHUTDOWN_IMPLICIT_LOGO,
|
||||
/* node shutting down due to LOGO recvd/sent (explicit logo) */
|
||||
EFC_EVT_SHUTDOWN_EXPLICIT_LOGO,
|
||||
|
||||
EFC_EVT_PLOGI_RCVD,
|
||||
EFC_EVT_FLOGI_RCVD,
|
||||
EFC_EVT_LOGO_RCVD,
|
||||
EFC_EVT_PRLI_RCVD,
|
||||
EFC_EVT_PRLO_RCVD,
|
||||
EFC_EVT_PDISC_RCVD,
|
||||
EFC_EVT_FDISC_RCVD,
|
||||
EFC_EVT_ADISC_RCVD,
|
||||
EFC_EVT_RSCN_RCVD,
|
||||
EFC_EVT_SCR_RCVD,
|
||||
EFC_EVT_ELS_RCVD,
|
||||
|
||||
EFC_EVT_FCP_CMD_RCVD,
|
||||
|
||||
EFC_EVT_GIDPT_DELAY_EXPIRED,
|
||||
|
||||
/* SCSI Target Server events */
|
||||
EFC_EVT_NODE_ACTIVE_IO_LIST_EMPTY,
|
||||
EFC_EVT_NODE_DEL_INI_COMPLETE,
|
||||
EFC_EVT_NODE_DEL_TGT_COMPLETE,
|
||||
EFC_EVT_NODE_SESS_REG_OK,
|
||||
EFC_EVT_NODE_SESS_REG_FAIL,
|
||||
|
||||
/* Must be last */
|
||||
EFC_EVT_LAST
|
||||
};
|
||||
|
||||
/* State Machine event name lookup array */
|
||||
#define EFC_SM_EVENT_NAME { \
|
||||
[EFC_EVT_ENTER] = "EFC_EVT_ENTER", \
|
||||
[EFC_EVT_REENTER] = "EFC_EVT_REENTER", \
|
||||
[EFC_EVT_EXIT] = "EFC_EVT_EXIT", \
|
||||
[EFC_EVT_SHUTDOWN] = "EFC_EVT_SHUTDOWN", \
|
||||
[EFC_EVT_ALL_CHILD_NODES_FREE] = "EFC_EVT_ALL_CHILD_NODES_FREE",\
|
||||
[EFC_EVT_RESUME] = "EFC_EVT_RESUME", \
|
||||
[EFC_EVT_TIMER_EXPIRED] = "EFC_EVT_TIMER_EXPIRED", \
|
||||
[EFC_EVT_RESPONSE] = "EFC_EVT_RESPONSE", \
|
||||
[EFC_EVT_ERROR] = "EFC_EVT_ERROR", \
|
||||
[EFC_EVT_DOMAIN_FOUND] = "EFC_EVT_DOMAIN_FOUND", \
|
||||
[EFC_EVT_DOMAIN_ALLOC_OK] = "EFC_EVT_DOMAIN_ALLOC_OK", \
|
||||
[EFC_EVT_DOMAIN_ALLOC_FAIL] = "EFC_EVT_DOMAIN_ALLOC_FAIL", \
|
||||
[EFC_EVT_DOMAIN_REQ_ATTACH] = "EFC_EVT_DOMAIN_REQ_ATTACH", \
|
||||
[EFC_EVT_DOMAIN_ATTACH_OK] = "EFC_EVT_DOMAIN_ATTACH_OK", \
|
||||
[EFC_EVT_DOMAIN_ATTACH_FAIL] = "EFC_EVT_DOMAIN_ATTACH_FAIL", \
|
||||
[EFC_EVT_DOMAIN_LOST] = "EFC_EVT_DOMAIN_LOST", \
|
||||
[EFC_EVT_DOMAIN_FREE_OK] = "EFC_EVT_DOMAIN_FREE_OK", \
|
||||
[EFC_EVT_DOMAIN_FREE_FAIL] = "EFC_EVT_DOMAIN_FREE_FAIL", \
|
||||
[EFC_EVT_HW_DOMAIN_REQ_ATTACH] = "EFC_EVT_HW_DOMAIN_REQ_ATTACH",\
|
||||
[EFC_EVT_HW_DOMAIN_REQ_FREE] = "EFC_EVT_HW_DOMAIN_REQ_FREE", \
|
||||
[EFC_EVT_NPORT_ALLOC_OK] = "EFC_EVT_NPORT_ALLOC_OK", \
|
||||
[EFC_EVT_NPORT_ALLOC_FAIL] = "EFC_EVT_NPORT_ALLOC_FAIL", \
|
||||
[EFC_EVT_NPORT_ATTACH_OK] = "EFC_EVT_NPORT_ATTACH_OK", \
|
||||
[EFC_EVT_NPORT_ATTACH_FAIL] = "EFC_EVT_NPORT_ATTACH_FAIL", \
|
||||
[EFC_EVT_NPORT_FREE_OK] = "EFC_EVT_NPORT_FREE_OK", \
|
||||
[EFC_EVT_NPORT_FREE_FAIL] = "EFC_EVT_NPORT_FREE_FAIL", \
|
||||
[EFC_EVT_NPORT_TOPOLOGY_NOTIFY] = "EFC_EVT_NPORT_TOPOLOGY_NOTIFY",\
|
||||
[EFC_EVT_HW_PORT_ALLOC_OK] = "EFC_EVT_HW_PORT_ALLOC_OK", \
|
||||
[EFC_EVT_HW_PORT_ALLOC_FAIL] = "EFC_EVT_HW_PORT_ALLOC_FAIL", \
|
||||
[EFC_EVT_HW_PORT_ATTACH_OK] = "EFC_EVT_HW_PORT_ATTACH_OK", \
|
||||
[EFC_EVT_HW_PORT_REQ_ATTACH] = "EFC_EVT_HW_PORT_REQ_ATTACH", \
|
||||
[EFC_EVT_HW_PORT_REQ_FREE] = "EFC_EVT_HW_PORT_REQ_FREE", \
|
||||
[EFC_EVT_HW_PORT_FREE_OK] = "EFC_EVT_HW_PORT_FREE_OK", \
|
||||
[EFC_EVT_SRRS_ELS_REQ_OK] = "EFC_EVT_SRRS_ELS_REQ_OK", \
|
||||
[EFC_EVT_SRRS_ELS_CMPL_OK] = "EFC_EVT_SRRS_ELS_CMPL_OK", \
|
||||
[EFC_EVT_SRRS_ELS_REQ_FAIL] = "EFC_EVT_SRRS_ELS_REQ_FAIL", \
|
||||
[EFC_EVT_SRRS_ELS_CMPL_FAIL] = "EFC_EVT_SRRS_ELS_CMPL_FAIL", \
|
||||
[EFC_EVT_SRRS_ELS_REQ_RJT] = "EFC_EVT_SRRS_ELS_REQ_RJT", \
|
||||
[EFC_EVT_NODE_ATTACH_OK] = "EFC_EVT_NODE_ATTACH_OK", \
|
||||
[EFC_EVT_NODE_ATTACH_FAIL] = "EFC_EVT_NODE_ATTACH_FAIL", \
|
||||
[EFC_EVT_NODE_FREE_OK] = "EFC_EVT_NODE_FREE_OK", \
|
||||
[EFC_EVT_NODE_FREE_FAIL] = "EFC_EVT_NODE_FREE_FAIL", \
|
||||
[EFC_EVT_ELS_FRAME] = "EFC_EVT_ELS_FRAME", \
|
||||
[EFC_EVT_ELS_REQ_TIMEOUT] = "EFC_EVT_ELS_REQ_TIMEOUT", \
|
||||
[EFC_EVT_ELS_REQ_ABORTED] = "EFC_EVT_ELS_REQ_ABORTED", \
|
||||
[EFC_EVT_ABORT_ELS] = "EFC_EVT_ABORT_ELS", \
|
||||
[EFC_EVT_ELS_ABORT_CMPL] = "EFC_EVT_ELS_ABORT_CMPL", \
|
||||
[EFC_EVT_ABTS_RCVD] = "EFC_EVT_ABTS_RCVD", \
|
||||
[EFC_EVT_NODE_MISSING] = "EFC_EVT_NODE_MISSING", \
|
||||
[EFC_EVT_NODE_REFOUND] = "EFC_EVT_NODE_REFOUND", \
|
||||
[EFC_EVT_SHUTDOWN_IMPLICIT_LOGO] = "EFC_EVT_SHUTDOWN_IMPLICIT_LOGO",\
|
||||
[EFC_EVT_SHUTDOWN_EXPLICIT_LOGO] = "EFC_EVT_SHUTDOWN_EXPLICIT_LOGO",\
|
||||
[EFC_EVT_PLOGI_RCVD] = "EFC_EVT_PLOGI_RCVD", \
|
||||
[EFC_EVT_FLOGI_RCVD] = "EFC_EVT_FLOGI_RCVD", \
|
||||
[EFC_EVT_LOGO_RCVD] = "EFC_EVT_LOGO_RCVD", \
|
||||
[EFC_EVT_PRLI_RCVD] = "EFC_EVT_PRLI_RCVD", \
|
||||
[EFC_EVT_PRLO_RCVD] = "EFC_EVT_PRLO_RCVD", \
|
||||
[EFC_EVT_PDISC_RCVD] = "EFC_EVT_PDISC_RCVD", \
|
||||
[EFC_EVT_FDISC_RCVD] = "EFC_EVT_FDISC_RCVD", \
|
||||
[EFC_EVT_ADISC_RCVD] = "EFC_EVT_ADISC_RCVD", \
|
||||
[EFC_EVT_RSCN_RCVD] = "EFC_EVT_RSCN_RCVD", \
|
||||
[EFC_EVT_SCR_RCVD] = "EFC_EVT_SCR_RCVD", \
|
||||
[EFC_EVT_ELS_RCVD] = "EFC_EVT_ELS_RCVD", \
|
||||
[EFC_EVT_FCP_CMD_RCVD] = "EFC_EVT_FCP_CMD_RCVD", \
|
||||
[EFC_EVT_NODE_DEL_INI_COMPLETE] = "EFC_EVT_NODE_DEL_INI_COMPLETE",\
|
||||
[EFC_EVT_NODE_DEL_TGT_COMPLETE] = "EFC_EVT_NODE_DEL_TGT_COMPLETE",\
|
||||
[EFC_EVT_LAST] = "EFC_EVT_LAST", \
|
||||
}
|
||||
|
||||
int
|
||||
efc_sm_post_event(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *data);
|
||||
void
|
||||
efc_sm_transition(struct efc_sm_ctx *ctx,
|
||||
void (*state)(struct efc_sm_ctx *ctx,
|
||||
enum efc_sm_event evt, void *arg),
|
||||
void *data);
|
||||
void efc_sm_disable(struct efc_sm_ctx *ctx);
|
||||
const char *efc_sm_event_name(enum efc_sm_event evt);
|
||||
|
||||
#endif /* ! _EFC_SM_H */
|
|
@ -0,0 +1,81 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (C) 2021 Broadcom. All Rights Reserved. The term
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
|
||||
*/
|
||||
|
||||
/*
|
||||
* LIBEFC LOCKING
|
||||
*
|
||||
* The critical sections protected by the efc's spinlock are quite broad and
|
||||
* may be improved upon in the future. The libefc code and its locking doesn't
|
||||
* influence the I/O path, so excessive locking doesn't impact I/O performance.
|
||||
*
|
||||
* The strategy is to lock whenever processing a request from user driver. This
|
||||
* means that the entry points into the libefc library are protected by efc
|
||||
* lock. So all the state machine transitions are protected.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/kernel.h>
|
||||
#include "efc.h"
|
||||
|
||||
int efcport_init(struct efc *efc)
|
||||
{
|
||||
u32 rc = 0;
|
||||
|
||||
spin_lock_init(&efc->lock);
|
||||
INIT_LIST_HEAD(&efc->vport_list);
|
||||
efc->hold_frames = false;
|
||||
spin_lock_init(&efc->pend_frames_lock);
|
||||
INIT_LIST_HEAD(&efc->pend_frames);
|
||||
|
||||
/* Create Node pool */
|
||||
efc->node_pool = mempool_create_kmalloc_pool(EFC_MAX_REMOTE_NODES,
|
||||
sizeof(struct efc_node));
|
||||
if (!efc->node_pool) {
|
||||
efc_log_err(efc, "Can't allocate node pool\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
efc->node_dma_pool = dma_pool_create("node_dma_pool", &efc->pci->dev,
|
||||
NODE_SPARAMS_SIZE, 0, 0);
|
||||
if (!efc->node_dma_pool) {
|
||||
efc_log_err(efc, "Can't allocate node dma pool\n");
|
||||
mempool_destroy(efc->node_pool);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
efc->els_io_pool = mempool_create_kmalloc_pool(EFC_ELS_IO_POOL_SZ,
|
||||
sizeof(struct efc_els_io_req));
|
||||
if (!efc->els_io_pool) {
|
||||
efc_log_err(efc, "Can't allocate els io pool\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void
|
||||
efc_purge_pending(struct efc *efc)
|
||||
{
|
||||
struct efc_hw_sequence *frame, *next;
|
||||
unsigned long flags = 0;
|
||||
|
||||
spin_lock_irqsave(&efc->pend_frames_lock, flags);
|
||||
|
||||
list_for_each_entry_safe(frame, next, &efc->pend_frames, list_entry) {
|
||||
list_del(&frame->list_entry);
|
||||
efc->tt.hw_seq_free(efc, frame);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&efc->pend_frames_lock, flags);
|
||||
}
|
||||
|
||||
void efcport_destroy(struct efc *efc)
|
||||
{
|
||||
efc_purge_pending(efc);
|
||||
mempool_destroy(efc->els_io_pool);
|
||||
mempool_destroy(efc->node_pool);
|
||||
dma_pool_destroy(efc->node_dma_pool);
|
||||
}
|
|
@ -0,0 +1,620 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/*
|
||||
* Copyright (C) 2021 Broadcom. All Rights Reserved. The term
|
||||
* “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
|
||||
*/
|
||||
|
||||
#ifndef __EFCLIB_H__
|
||||
#define __EFCLIB_H__
|
||||
|
||||
#include "scsi/fc/fc_els.h"
|
||||
#include "scsi/fc/fc_fs.h"
|
||||
#include "scsi/fc/fc_ns.h"
|
||||
#include "scsi/fc/fc_gs.h"
|
||||
#include "scsi/fc_frame.h"
|
||||
#include "../include/efc_common.h"
|
||||
#include "../libefc_sli/sli4.h"
|
||||
|
||||
#define EFC_SERVICE_PARMS_LENGTH 120
|
||||
#define EFC_NAME_LENGTH 32
|
||||
#define EFC_SM_NAME_LENGTH 64
|
||||
#define EFC_DISPLAY_BUS_INFO_LENGTH 16
|
||||
|
||||
#define EFC_WWN_LENGTH 32
|
||||
|
||||
#define EFC_FC_ELS_DEFAULT_RETRIES 3
|
||||
|
||||
/* Timeouts */
|
||||
#define EFC_FC_ELS_SEND_DEFAULT_TIMEOUT 0
|
||||
#define EFC_FC_FLOGI_TIMEOUT_SEC 5
|
||||
#define EFC_SHUTDOWN_TIMEOUT_USEC 30000000
|
||||
|
||||
/* Return values for calls from base driver to libefc */
|
||||
#define EFC_SCSI_CALL_COMPLETE 0
|
||||
#define EFC_SCSI_CALL_ASYNC 1
|
||||
|
||||
/* Local port topology */
|
||||
enum efc_nport_topology {
|
||||
EFC_NPORT_TOPO_UNKNOWN = 0,
|
||||
EFC_NPORT_TOPO_FABRIC,
|
||||
EFC_NPORT_TOPO_P2P,
|
||||
EFC_NPORT_TOPO_FC_AL,
|
||||
};
|
||||
|
||||
#define enable_target_rscn(efc) 1
|
||||
|
||||
enum efc_node_shutd_rsn {
|
||||
EFC_NODE_SHUTDOWN_DEFAULT = 0,
|
||||
EFC_NODE_SHUTDOWN_EXPLICIT_LOGO,
|
||||
EFC_NODE_SHUTDOWN_IMPLICIT_LOGO,
|
||||
};
|
||||
|
||||
enum efc_node_send_ls_acc {
|
||||
EFC_NODE_SEND_LS_ACC_NONE = 0,
|
||||
EFC_NODE_SEND_LS_ACC_PLOGI,
|
||||
EFC_NODE_SEND_LS_ACC_PRLI,
|
||||
};
|
||||
|
||||
#define EFC_LINK_STATUS_UP 0
|
||||
#define EFC_LINK_STATUS_DOWN 1
|
||||
|
||||
/* State machine context header */
|
||||
struct efc_sm_ctx {
|
||||
void (*current_state)(struct efc_sm_ctx *ctx,
|
||||
u32 evt, void *arg);
|
||||
|
||||
const char *description;
|
||||
void *app;
|
||||
};
|
||||
|
||||
/* Description of discovered Fabric Domain */
|
||||
struct efc_domain_record {
|
||||
u32 index;
|
||||
u32 priority;
|
||||
u8 address[6];
|
||||
u8 wwn[8];
|
||||
union {
|
||||
u8 vlan[512];
|
||||
u8 loop[128];
|
||||
} map;
|
||||
u32 speed;
|
||||
u32 fc_id;
|
||||
bool is_loop;
|
||||
bool is_nport;
|
||||
};
|
||||
|
||||
/* Domain events */
|
||||
enum efc_hw_domain_event {
|
||||
EFC_HW_DOMAIN_ALLOC_OK,
|
||||
EFC_HW_DOMAIN_ALLOC_FAIL,
|
||||
EFC_HW_DOMAIN_ATTACH_OK,
|
||||
EFC_HW_DOMAIN_ATTACH_FAIL,
|
||||
EFC_HW_DOMAIN_FREE_OK,
|
||||
EFC_HW_DOMAIN_FREE_FAIL,
|
||||
EFC_HW_DOMAIN_LOST,
|
||||
EFC_HW_DOMAIN_FOUND,
|
||||
EFC_HW_DOMAIN_CHANGED,
|
||||
};
|
||||
|
||||
/**
|
||||
* Fibre Channel port object
|
||||
*
|
||||
* @list_entry: nport list entry
|
||||
* @ref: reference count, each node takes a reference
|
||||
* @release: function to free nport object
|
||||
* @efc: pointer back to efc
|
||||
* @instance_index: unique instance index value
|
||||
* @display_name: port display name
|
||||
* @is_vport: Is NPIV port
|
||||
* @free_req_pending: pending request to free resources
|
||||
* @attached: mark attached if reg VPI succeeds
|
||||
* @p2p_winner: TRUE if we're the point-to-point winner
|
||||
* @domain: pointer back to domain
|
||||
* @wwpn: port wwpn
|
||||
* @wwnn: port wwnn
|
||||
* @tgt_data: target backend private port data
|
||||
* @ini_data: initiator backend private port data
|
||||
* @indicator: VPI
|
||||
* @fc_id: port FC address
|
||||
* @dma: memory for Service Parameters
|
||||
* @wwnn_str: wwpn string
|
||||
* @sli_wwpn: SLI provided wwpn
|
||||
* @sli_wwnn: SLI provided wwnn
|
||||
* @sm: nport state machine context
|
||||
* @lookup: fc_id to node lookup object
|
||||
* @enable_ini: SCSI initiator enabled for this port
|
||||
* @enable_tgt: SCSI target enabled for this port
|
||||
* @enable_rscn: port will be expecting RSCN
|
||||
* @shutting_down: nport in process of shutting down
|
||||
* @p2p_port_id: our port id for point-to-point
|
||||
* @topology: topology: fabric/p2p/unknown
|
||||
* @service_params: login parameters
|
||||
* @p2p_remote_port_id: remote node's port id for point-to-point
|
||||
*/
|
||||
|
||||
struct efc_nport {
|
||||
struct list_head list_entry;
|
||||
struct kref ref;
|
||||
void (*release)(struct kref *arg);
|
||||
struct efc *efc;
|
||||
u32 instance_index;
|
||||
char display_name[EFC_NAME_LENGTH];
|
||||
bool is_vport;
|
||||
bool free_req_pending;
|
||||
bool attached;
|
||||
bool p2p_winner;
|
||||
struct efc_domain *domain;
|
||||
u64 wwpn;
|
||||
u64 wwnn;
|
||||
void *tgt_data;
|
||||
void *ini_data;
|
||||
|
||||
u32 indicator;
|
||||
u32 fc_id;
|
||||
struct efc_dma dma;
|
||||
|
||||
u8 wwnn_str[EFC_WWN_LENGTH];
|
||||
__be64 sli_wwpn;
|
||||
__be64 sli_wwnn;
|
||||
|
||||
struct efc_sm_ctx sm;
|
||||
struct xarray lookup;
|
||||
bool enable_ini;
|
||||
bool enable_tgt;
|
||||
bool enable_rscn;
|
||||
bool shutting_down;
|
||||
u32 p2p_port_id;
|
||||
enum efc_nport_topology topology;
|
||||
u8 service_params[EFC_SERVICE_PARMS_LENGTH];
|
||||
u32 p2p_remote_port_id;
|
||||
};
|
||||
|
||||
/**
|
||||
* Fibre Channel domain object
|
||||
*
|
||||
* This object is a container for the various SLI components needed
|
||||
* to connect to the domain of a FC or FCoE switch
|
||||
* @efc: pointer back to efc
|
||||
* @instance_index: unique instance index value
|
||||
* @display_name: Node display name
|
||||
* @nport_list: linked list of nports associated with this domain
|
||||
* @ref: Reference count, each nport takes a reference
|
||||
* @release: Function to free domain object
|
||||
* @ini_domain: initiator backend private domain data
|
||||
* @tgt_domain: target backend private domain data
|
||||
* @sm: state machine context
|
||||
* @fcf: FC Forwarder table index
|
||||
* @fcf_indicator: FCFI
|
||||
* @indicator: VFI
|
||||
* @nport_count: Number of nports allocated
|
||||
* @dma: memory for Service Parameters
|
||||
* @fcf_wwn: WWN for FCF/switch
|
||||
* @drvsm: driver domain sm context
|
||||
* @attached: set true after attach completes
|
||||
* @is_fc: is FC
|
||||
* @is_loop: is loop topology
|
||||
* @is_nlport: is public loop
|
||||
* @domain_found_pending:A domain found is pending, drec is updated
|
||||
* @req_domain_free: True if domain object should be free'd
|
||||
* @req_accept_frames: set in domain state machine to enable frames
|
||||
* @domain_notify_pend: Set in domain SM to avoid duplicate node event post
|
||||
* @pending_drec: Pending drec if a domain found is pending
|
||||
* @service_params: any nports service parameters
|
||||
* @flogi_service_params:Fabric/P2p service parameters from FLOGI
|
||||
* @lookup: d_id to node lookup object
|
||||
* @nport: Pointer to first (physical) SLI port
|
||||
*/
|
||||
struct efc_domain {
|
||||
struct efc *efc;
|
||||
char display_name[EFC_NAME_LENGTH];
|
||||
struct list_head nport_list;
|
||||
struct kref ref;
|
||||
void (*release)(struct kref *arg);
|
||||
void *ini_domain;
|
||||
void *tgt_domain;
|
||||
|
||||
/* Declarations private to HW/SLI */
|
||||
u32 fcf;
|
||||
u32 fcf_indicator;
|
||||
u32 indicator;
|
||||
u32 nport_count;
|
||||
struct efc_dma dma;
|
||||
|
||||
/* Declarations private to FC trannport */
|
||||
u64 fcf_wwn;
|
||||
struct efc_sm_ctx drvsm;
|
||||
bool attached;
|
||||
bool is_fc;
|
||||
bool is_loop;
|
||||
bool is_nlport;
|
||||
bool domain_found_pending;
|
||||
bool req_domain_free;
|
||||
bool req_accept_frames;
|
||||
bool domain_notify_pend;
|
||||
|
||||
struct efc_domain_record pending_drec;
|
||||
u8 service_params[EFC_SERVICE_PARMS_LENGTH];
|
||||
u8 flogi_service_params[EFC_SERVICE_PARMS_LENGTH];
|
||||
|
||||
struct xarray lookup;
|
||||
|
||||
struct efc_nport *nport;
|
||||
};
|
||||
|
||||
/**
|
||||
* Remote Node object
|
||||
*
|
||||
* This object represents a connection between the SLI port and another
|
||||
* Nx_Port on the fabric. Note this can be either a well known port such
|
||||
* as a F_Port (i.e. ff:ff:fe) or another N_Port.
|
||||
* @indicator: RPI
|
||||
* @fc_id: FC address
|
||||
* @attached: true if attached
|
||||
* @nport: associated SLI port
|
||||
* @node: associated node
|
||||
*/
|
||||
struct efc_remote_node {
|
||||
u32 indicator;
|
||||
u32 index;
|
||||
u32 fc_id;
|
||||
|
||||
bool attached;
|
||||
|
||||
struct efc_nport *nport;
|
||||
void *node;
|
||||
};
|
||||
|
||||
/**
|
||||
* FC Node object
|
||||
* @efc: pointer back to efc structure
|
||||
* @display_name: Node display name
|
||||
* @nort: Assosiated nport pointer.
|
||||
* @hold_frames: hold incoming frames if true
|
||||
* @els_io_enabled: Enable allocating els ios for this node
|
||||
* @els_ios_lock: lock to protect the els ios list
|
||||
* @els_ios_list: ELS I/O's for this node
|
||||
* @ini_node: backend initiator private node data
|
||||
* @tgt_node: backend target private node data
|
||||
* @rnode: Remote node
|
||||
* @sm: state machine context
|
||||
* @evtdepth: current event posting nesting depth
|
||||
* @req_free: this node is to be free'd
|
||||
* @attached: node is attached (REGLOGIN complete)
|
||||
* @fcp_enabled: node is enabled to handle FCP
|
||||
* @rscn_pending: for name server node RSCN is pending
|
||||
* @send_plogi: send PLOGI accept, upon completion of node attach
|
||||
* @send_plogi_acc: TRUE if io_alloc() is enabled.
|
||||
* @send_ls_acc: type of LS acc to send
|
||||
* @ls_acc_io: SCSI IO for LS acc
|
||||
* @ls_acc_oxid: OX_ID for pending accept
|
||||
* @ls_acc_did: D_ID for pending accept
|
||||
* @shutdown_reason: reason for node shutdown
|
||||
* @sparm_dma_buf: service parameters buffer
|
||||
* @service_params: plogi/acc frame from remote device
|
||||
* @pend_frames_lock: lock for inbound pending frames list
|
||||
* @pend_frames: inbound pending frames list
|
||||
* @pend_frames_processed:count of frames processed in hold frames interval
|
||||
* @ox_id_in_use: used to verify one at a time us of ox_id
|
||||
* @els_retries_remaining:for ELS, number of retries remaining
|
||||
* @els_req_cnt: number of outstanding ELS requests
|
||||
* @els_cmpl_cnt: number of outstanding ELS completions
|
||||
* @abort_cnt: Abort counter for debugging purpos
|
||||
* @current_state_name: current node state
|
||||
* @prev_state_name: previous node state
|
||||
* @current_evt: current event
|
||||
* @prev_evt: previous event
|
||||
* @targ: node is target capable
|
||||
* @init: node is init capable
|
||||
* @refound: Handle node refound case when node is being deleted
|
||||
* @els_io_pend_list: list of pending (not yet processed) ELS IOs
|
||||
* @els_io_active_list: list of active (processed) ELS IOs
|
||||
* @nodedb_state: Node debugging, saved state
|
||||
* @gidpt_delay_timer: GIDPT delay timer
|
||||
* @time_last_gidpt_msec:Start time of last target RSCN GIDPT
|
||||
* @wwnn: remote port WWNN
|
||||
* @wwpn: remote port WWPN
|
||||
*/
|
||||
struct efc_node {
|
||||
struct efc *efc;
|
||||
char display_name[EFC_NAME_LENGTH];
|
||||
struct efc_nport *nport;
|
||||
struct kref ref;
|
||||
void (*release)(struct kref *arg);
|
||||
bool hold_frames;
|
||||
bool els_io_enabled;
|
||||
bool send_plogi_acc;
|
||||
bool send_plogi;
|
||||
bool rscn_pending;
|
||||
bool fcp_enabled;
|
||||
bool attached;
|
||||
bool req_free;
|
||||
|
||||
spinlock_t els_ios_lock;
|
||||
struct list_head els_ios_list;
|
||||
void *ini_node;
|
||||
void *tgt_node;
|
||||
|
||||
struct efc_remote_node rnode;
|
||||
/* Declarations private to FC trannport */
|
||||
struct efc_sm_ctx sm;
|
||||
u32 evtdepth;
|
||||
|
||||
enum efc_node_send_ls_acc send_ls_acc;
|
||||
void *ls_acc_io;
|
||||
u32 ls_acc_oxid;
|
||||
u32 ls_acc_did;
|
||||
enum efc_node_shutd_rsn shutdown_reason;
|
||||
bool targ;
|
||||
bool init;
|
||||
bool refound;
|
||||
struct efc_dma sparm_dma_buf;
|
||||
u8 service_params[EFC_SERVICE_PARMS_LENGTH];
|
||||
spinlock_t pend_frames_lock;
|
||||
struct list_head pend_frames;
|
||||
u32 pend_frames_processed;
|
||||
u32 ox_id_in_use;
|
||||
u32 els_retries_remaining;
|
||||
u32 els_req_cnt;
|
||||
u32 els_cmpl_cnt;
|
||||
u32 abort_cnt;
|
||||
|
||||
char current_state_name[EFC_SM_NAME_LENGTH];
|
||||
char prev_state_name[EFC_SM_NAME_LENGTH];
|
||||
int current_evt;
|
||||
int prev_evt;
|
||||
|
||||
void (*nodedb_state)(struct efc_sm_ctx *ctx,
|
||||
u32 evt, void *arg);
|
||||
struct timer_list gidpt_delay_timer;
|
||||
u64 time_last_gidpt_msec;
|
||||
|
||||
char wwnn[EFC_WWN_LENGTH];
|
||||
char wwpn[EFC_WWN_LENGTH];
|
||||
};
|
||||
|
||||
/**
|
||||
* NPIV port
|
||||
*
|
||||
* Collection of the information required to restore a virtual port across
|
||||
* link events
|
||||
* @wwnn: node name
|
||||
* @wwpn: port name
|
||||
* @fc_id: port id
|
||||
* @tgt_data: target backend pointer
|
||||
* @ini_data: initiator backend pointe
|
||||
* @nport: Used to match record after attaching for update
|
||||
*
|
||||
*/
|
||||
|
||||
struct efc_vport {
|
||||
struct list_head list_entry;
|
||||
u64 wwnn;
|
||||
u64 wwpn;
|
||||
u32 fc_id;
|
||||
bool enable_tgt;
|
||||
bool enable_ini;
|
||||
void *tgt_data;
|
||||
void *ini_data;
|
||||
struct efc_nport *nport;
|
||||
};
|
||||
|
||||
#define node_printf(node, fmt, args...) \
|
||||
efc_log_info(node->efc, "[%s] " fmt, node->display_name, ##args)
|
||||
|
||||
/* Node SM IO Context Callback structure */
|
||||
struct efc_node_cb {
|
||||
int status;
|
||||
int ext_status;
|
||||
struct efc_hw_rq_buffer *header;
|
||||
struct efc_hw_rq_buffer *payload;
|
||||
struct efc_dma els_rsp;
|
||||
|
||||
/* Actual length of data received */
|
||||
int rsp_len;
|
||||
};
|
||||
|
||||
struct efc_hw_rq_buffer {
|
||||
u16 rqindex;
|
||||
struct efc_dma dma;
|
||||
};
|
||||
|
||||
/**
|
||||
* FC sequence object
|
||||
*
|
||||
* Defines a general FC sequence object
|
||||
* @hw: HW that owns this sequence
|
||||
* @fcfi: FCFI associated with sequence
|
||||
* @header: Received frame header
|
||||
* @payload: Received frame header
|
||||
* @hw_priv: HW private context
|
||||
*/
|
||||
struct efc_hw_sequence {
|
||||
struct list_head list_entry;
|
||||
void *hw;
|
||||
u8 fcfi;
|
||||
struct efc_hw_rq_buffer *header;
|
||||
struct efc_hw_rq_buffer *payload;
|
||||
void *hw_priv;
|
||||
};
|
||||
|
||||
enum efc_disc_io_type {
|
||||
EFC_DISC_IO_ELS_REQ,
|
||||
EFC_DISC_IO_ELS_RESP,
|
||||
EFC_DISC_IO_CT_REQ,
|
||||
EFC_DISC_IO_CT_RESP
|
||||
};
|
||||
|
||||
struct efc_io_els_params {
|
||||
u32 s_id;
|
||||
u16 ox_id;
|
||||
u8 timeout;
|
||||
};
|
||||
|
||||
struct efc_io_ct_params {
|
||||
u8 r_ctl;
|
||||
u8 type;
|
||||
u8 df_ctl;
|
||||
u8 timeout;
|
||||
u16 ox_id;
|
||||
};
|
||||
|
||||
union efc_disc_io_param {
|
||||
struct efc_io_els_params els;
|
||||
struct efc_io_ct_params ct;
|
||||
};
|
||||
|
||||
struct efc_disc_io {
|
||||
struct efc_dma req; /* send buffer */
|
||||
struct efc_dma rsp; /* receive buffer */
|
||||
enum efc_disc_io_type io_type; /* EFC_DISC_IO_TYPE enum*/
|
||||
u16 xmit_len; /* Length of els request*/
|
||||
u16 rsp_len; /* Max length of rsps to be rcvd */
|
||||
u32 rpi; /* Registered RPI */
|
||||
u32 vpi; /* VPI for this nport */
|
||||
u32 s_id;
|
||||
u32 d_id;
|
||||
bool rpi_registered; /* if false, use tmp RPI */
|
||||
union efc_disc_io_param iparam;
|
||||
};
|
||||
|
||||
/* Return value indiacating the sequence can not be freed */
|
||||
#define EFC_HW_SEQ_HOLD 0
|
||||
/* Return value indiacating the sequence can be freed */
|
||||
#define EFC_HW_SEQ_FREE 1
|
||||
|
||||
struct libefc_function_template {
|
||||
/*Sport*/
|
||||
int (*new_nport)(struct efc *efc, struct efc_nport *sp);
|
||||
void (*del_nport)(struct efc *efc, struct efc_nport *sp);
|
||||
|
||||
/*Scsi Node*/
|
||||
int (*scsi_new_node)(struct efc *efc, struct efc_node *n);
|
||||
int (*scsi_del_node)(struct efc *efc, struct efc_node *n, int reason);
|
||||
|
||||
int (*issue_mbox_rqst)(void *efct, void *buf, void *cb, void *arg);
|
||||
/*Send ELS IO*/
|
||||
int (*send_els)(struct efc *efc, struct efc_disc_io *io);
|
||||
/*Send BLS IO*/
|
||||
int (*send_bls)(struct efc *efc, u32 type, struct sli_bls_params *bls);
|
||||
/*Free HW frame*/
|
||||
int (*hw_seq_free)(struct efc *efc, struct efc_hw_sequence *seq);
|
||||
};
|
||||
|
||||
#define EFC_LOG_LIB 0x01
|
||||
#define EFC_LOG_NODE 0x02
|
||||
#define EFC_LOG_PORT 0x04
|
||||
#define EFC_LOG_DOMAIN 0x08
|
||||
#define EFC_LOG_ELS 0x10
|
||||
#define EFC_LOG_DOMAIN_SM 0x20
|
||||
#define EFC_LOG_SM 0x40
|
||||
|
||||
/* efc library port structure */
|
||||
struct efc {
|
||||
void *base;
|
||||
struct pci_dev *pci;
|
||||
struct sli4 *sli;
|
||||
u32 fcfi;
|
||||
u64 req_wwpn;
|
||||
u64 req_wwnn;
|
||||
|
||||
u64 def_wwpn;
|
||||
u64 def_wwnn;
|
||||
u64 max_xfer_size;
|
||||
mempool_t *node_pool;
|
||||
struct dma_pool *node_dma_pool;
|
||||
u32 nodes_count;
|
||||
|
||||
u32 link_status;
|
||||
|
||||
struct list_head vport_list;
|
||||
/* lock to protect the vport list */
|
||||
spinlock_t vport_lock;
|
||||
|
||||
struct libefc_function_template tt;
|
||||
/* lock to protect the discovery library.
|
||||
* Refer to efclib.c for more details.
|
||||
*/
|
||||
spinlock_t lock;
|
||||
|
||||
bool enable_ini;
|
||||
bool enable_tgt;
|
||||
|
||||
u32 log_level;
|
||||
|
||||
struct efc_domain *domain;
|
||||
void (*domain_free_cb)(struct efc *efc, void *arg);
|
||||
void *domain_free_cb_arg;
|
||||
|
||||
u64 tgt_rscn_delay_msec;
|
||||
u64 tgt_rscn_period_msec;
|
||||
|
||||
bool external_loopback;
|
||||
u32 nodedb_mask;
|
||||
u32 logmask;
|
||||
mempool_t *els_io_pool;
|
||||
atomic_t els_io_alloc_failed_count;
|
||||
|
||||
/* hold pending frames */
|
||||
bool hold_frames;
|
||||
/* lock to protect pending frames list access */
|
||||
spinlock_t pend_frames_lock;
|
||||
struct list_head pend_frames;
|
||||
/* count of pending frames that were processed */
|
||||
u32 pend_frames_processed;
|
||||
|
||||
};
|
||||
|
||||
/*
|
||||
* EFC library registration
|
||||
* **********************************/
|
||||
int efcport_init(struct efc *efc);
|
||||
void efcport_destroy(struct efc *efc);
|
||||
/*
|
||||
* EFC Domain
|
||||
* **********************************/
|
||||
int efc_domain_cb(void *arg, int event, void *data);
|
||||
void
|
||||
efc_register_domain_free_cb(struct efc *efc,
|
||||
void (*callback)(struct efc *efc, void *arg),
|
||||
void *arg);
|
||||
|
||||
/*
|
||||
* EFC nport
|
||||
* **********************************/
|
||||
void efc_nport_cb(void *arg, int event, void *data);
|
||||
struct efc_vport *
|
||||
efc_vport_create_spec(struct efc *efc, u64 wwnn, u64 wwpn, u32 fc_id,
|
||||
bool enable_ini, bool enable_tgt,
|
||||
void *tgt_data, void *ini_data);
|
||||
int efc_nport_vport_new(struct efc_domain *domain, u64 wwpn,
|
||||
u64 wwnn, u32 fc_id, bool ini, bool tgt,
|
||||
void *tgt_data, void *ini_data);
|
||||
int efc_nport_vport_del(struct efc *efc, struct efc_domain *domain,
|
||||
u64 wwpn, u64 wwnn);
|
||||
|
||||
void efc_vport_del_all(struct efc *efc);
|
||||
|
||||
/*
|
||||
* EFC Node
|
||||
* **********************************/
|
||||
int efc_remote_node_cb(void *arg, int event, void *data);
|
||||
void efc_node_fcid_display(u32 fc_id, char *buffer, u32 buf_len);
|
||||
void efc_node_post_shutdown(struct efc_node *node, void *arg);
|
||||
u64 efc_node_get_wwpn(struct efc_node *node);
|
||||
|
||||
/*
|
||||
* EFC FCP/ELS/CT interface
|
||||
* **********************************/
|
||||
void efc_dispatch_frame(struct efc *efc, struct efc_hw_sequence *seq);
|
||||
void efc_disc_io_complete(struct efc_disc_io *io, u32 len, u32 status,
|
||||
u32 ext_status);
|
||||
|
||||
/*
|
||||
* EFC SCSI INTERACTION LAYER
|
||||
* **********************************/
|
||||
void efc_scsi_sess_reg_complete(struct efc_node *node, u32 status);
|
||||
void efc_scsi_del_initiator_complete(struct efc *efc, struct efc_node *node);
|
||||
void efc_scsi_del_target_complete(struct efc *efc, struct efc_node *node);
|
||||
void efc_scsi_io_list_empty(struct efc *efc, struct efc_node *node);
|
||||
|
||||
#endif /* __EFCLIB_H__ */
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -1141,7 +1141,7 @@ struct __packed atto_ioctl_vda_gsv_cmd {
|
|||
|
||||
u8 rsp_len;
|
||||
u8 reserved[7];
|
||||
u8 version_info[1];
|
||||
u8 version_info[];
|
||||
#define ATTO_VDA_VER_UNSUPPORTED 0xFF
|
||||
|
||||
};
|
||||
|
|
|
@ -1525,7 +1525,7 @@ void esas2r_complete_request_cb(struct esas2r_adapter *a,
|
|||
|
||||
rq->cmd->result =
|
||||
((esas2r_req_status_to_error(rq->req_stat) << 16)
|
||||
| (rq->func_rsp.scsi_rsp.scsi_stat & STATUS_MASK));
|
||||
| rq->func_rsp.scsi_rsp.scsi_stat);
|
||||
|
||||
if (rq->req_stat == RS_UNDERRUN)
|
||||
scsi_set_resid(rq->cmd,
|
||||
|
|
|
@ -922,9 +922,7 @@ static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent,
|
|||
* saw originally. Also, report that we are providing
|
||||
* the sense data.
|
||||
*/
|
||||
cmd->result = ((DRIVER_SENSE << 24) |
|
||||
(DID_OK << 16) |
|
||||
(SAM_STAT_CHECK_CONDITION << 0));
|
||||
cmd->result = SAM_STAT_CHECK_CONDITION;
|
||||
|
||||
ent->flags &= ~ESP_CMD_FLAG_AUTOSENSE;
|
||||
if (esp_debug & ESP_DEBUG_AUTOSENSE) {
|
||||
|
|
|
@ -293,7 +293,7 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe,
|
|||
struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
|
||||
struct netdev_hw_addr *ha;
|
||||
struct net_device *real_dev;
|
||||
u8 flogi_maddr[ETH_ALEN];
|
||||
static const u8 flogi_maddr[ETH_ALEN] = FC_FCOE_FLOGI_MAC;
|
||||
const struct net_device_ops *ops;
|
||||
|
||||
fcoe->netdev = netdev;
|
||||
|
@ -336,7 +336,6 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe,
|
|||
* or enter promiscuous mode if not capable of listening
|
||||
* for multiple unicast MACs.
|
||||
*/
|
||||
memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
|
||||
dev_uc_add(netdev, flogi_maddr);
|
||||
if (fip->spma)
|
||||
dev_uc_add(netdev, fip->ctl_src_addr);
|
||||
|
@ -442,7 +441,7 @@ static void fcoe_interface_remove(struct fcoe_interface *fcoe)
|
|||
{
|
||||
struct net_device *netdev = fcoe->netdev;
|
||||
struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
|
||||
u8 flogi_maddr[ETH_ALEN];
|
||||
static const u8 flogi_maddr[ETH_ALEN] = FC_FCOE_FLOGI_MAC;
|
||||
const struct net_device_ops *ops;
|
||||
|
||||
/*
|
||||
|
@ -458,7 +457,6 @@ static void fcoe_interface_remove(struct fcoe_interface *fcoe)
|
|||
synchronize_net();
|
||||
|
||||
/* Delete secondary MAC addresses */
|
||||
memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
|
||||
dev_uc_del(netdev, flogi_maddr);
|
||||
if (fip->spma)
|
||||
dev_uc_del(netdev, fip->ctl_src_addr);
|
||||
|
|
|
@ -202,11 +202,10 @@ static int fdomain_select(struct Scsi_Host *sh, int target)
|
|||
return 1;
|
||||
}
|
||||
|
||||
static void fdomain_finish_cmd(struct fdomain *fd, int result)
|
||||
static void fdomain_finish_cmd(struct fdomain *fd)
|
||||
{
|
||||
outb(0, fd->base + REG_ICTL);
|
||||
fdomain_make_bus_idle(fd);
|
||||
fd->cur_cmd->result = result;
|
||||
fd->cur_cmd->scsi_done(fd->cur_cmd);
|
||||
fd->cur_cmd = NULL;
|
||||
}
|
||||
|
@ -273,7 +272,8 @@ static void fdomain_work(struct work_struct *work)
|
|||
if (cmd->SCp.phase & in_arbitration) {
|
||||
status = inb(fd->base + REG_ASTAT);
|
||||
if (!(status & ASTAT_ARB)) {
|
||||
fdomain_finish_cmd(fd, DID_BUS_BUSY << 16);
|
||||
set_host_byte(cmd, DID_BUS_BUSY);
|
||||
fdomain_finish_cmd(fd);
|
||||
goto out;
|
||||
}
|
||||
cmd->SCp.phase = in_selection;
|
||||
|
@ -290,7 +290,8 @@ static void fdomain_work(struct work_struct *work)
|
|||
if (!(status & BSTAT_BSY)) {
|
||||
/* Try again, for slow devices */
|
||||
if (fdomain_select(cmd->device->host, scmd_id(cmd))) {
|
||||
fdomain_finish_cmd(fd, DID_NO_CONNECT << 16);
|
||||
set_host_byte(cmd, DID_NO_CONNECT);
|
||||
fdomain_finish_cmd(fd);
|
||||
goto out;
|
||||
}
|
||||
/* Stop arbitration and enable parity */
|
||||
|
@ -333,7 +334,7 @@ static void fdomain_work(struct work_struct *work)
|
|||
break;
|
||||
case BSTAT_MSG | BSTAT_CMD | BSTAT_IO: /* MESSAGE IN */
|
||||
cmd->SCp.Message = inb(fd->base + REG_SCSI_DATA);
|
||||
if (!cmd->SCp.Message)
|
||||
if (cmd->SCp.Message == COMMAND_COMPLETE)
|
||||
++done;
|
||||
break;
|
||||
}
|
||||
|
@ -359,9 +360,10 @@ static void fdomain_work(struct work_struct *work)
|
|||
fdomain_read_data(cmd);
|
||||
|
||||
if (done) {
|
||||
fdomain_finish_cmd(fd, (cmd->SCp.Status & 0xff) |
|
||||
((cmd->SCp.Message & 0xff) << 8) |
|
||||
(DID_OK << 16));
|
||||
set_status_byte(cmd, cmd->SCp.Status);
|
||||
set_host_byte(cmd, DID_OK);
|
||||
scsi_msg_to_host_byte(cmd, cmd->SCp.Message);
|
||||
fdomain_finish_cmd(fd);
|
||||
} else {
|
||||
if (cmd->SCp.phase & disconnect) {
|
||||
outb(ICTL_FIFO | ICTL_SEL | ICTL_REQ | FIFO_COUNT,
|
||||
|
@ -439,10 +441,10 @@ static int fdomain_abort(struct scsi_cmnd *cmd)
|
|||
|
||||
fdomain_make_bus_idle(fd);
|
||||
fd->cur_cmd->SCp.phase |= aborted;
|
||||
fd->cur_cmd->result = DID_ABORT << 16;
|
||||
|
||||
/* Aborts are not done well. . . */
|
||||
fdomain_finish_cmd(fd, DID_ABORT << 16);
|
||||
set_host_byte(fd->cur_cmd, DID_ABORT);
|
||||
fdomain_finish_cmd(fd);
|
||||
spin_unlock_irqrestore(sh->host_lock, flags);
|
||||
return SUCCESS;
|
||||
}
|
||||
|
|
|
@ -8,6 +8,7 @@
|
|||
#define _HISI_SAS_H_
|
||||
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/async.h>
|
||||
#include <linux/blk-mq.h>
|
||||
#include <linux/blk-mq-pci.h>
|
||||
#include <linux/clk.h>
|
||||
|
@ -37,6 +38,7 @@
|
|||
#define HISI_SAS_RESET_BIT 0
|
||||
#define HISI_SAS_REJECT_CMD_BIT 1
|
||||
#define HISI_SAS_PM_BIT 2
|
||||
#define HISI_SAS_HW_FAULT_BIT 3
|
||||
#define HISI_SAS_MAX_COMMANDS (HISI_SAS_QUEUE_SLOTS)
|
||||
#define HISI_SAS_RESERVED_IPTT 96
|
||||
#define HISI_SAS_UNRESERVED_IPTT \
|
||||
|
@ -90,8 +92,8 @@
|
|||
|
||||
#define HISI_SAS_PROT_MASK (HISI_SAS_DIF_PROT_MASK | HISI_SAS_DIX_PROT_MASK)
|
||||
|
||||
#define HISI_SAS_WAIT_PHYUP_TIMEOUT 20
|
||||
#define CLEAR_ITCT_TIMEOUT 20
|
||||
#define HISI_SAS_WAIT_PHYUP_TIMEOUT (20 * HZ)
|
||||
#define HISI_SAS_CLEAR_ITCT_TIMEOUT (20 * HZ)
|
||||
|
||||
struct hisi_hba;
|
||||
|
||||
|
@ -185,6 +187,7 @@ struct hisi_sas_phy {
|
|||
enum sas_linkrate minimum_linkrate;
|
||||
enum sas_linkrate maximum_linkrate;
|
||||
int enable;
|
||||
int wait_phyup_cnt;
|
||||
atomic_t down_cnt;
|
||||
|
||||
/* Trace FIFO */
|
||||
|
|
|
@ -15,7 +15,7 @@ static int hisi_sas_debug_issue_ssp_tmf(struct domain_device *device,
|
|||
static int
|
||||
hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
|
||||
struct domain_device *device,
|
||||
int abort_flag, int tag);
|
||||
int abort_flag, int tag, bool rst_to_recover);
|
||||
static int hisi_sas_softreset_ata_disk(struct domain_device *device);
|
||||
static int hisi_sas_control_phy(struct asd_sas_phy *sas_phy, enum phy_func func,
|
||||
void *funcdata);
|
||||
|
@ -857,6 +857,7 @@ static void hisi_sas_phyup_work(struct work_struct *work)
|
|||
struct asd_sas_phy *sas_phy = &phy->sas_phy;
|
||||
int phy_no = sas_phy->id;
|
||||
|
||||
phy->wait_phyup_cnt = 0;
|
||||
if (phy->identify.target_port_protocols == SAS_PROTOCOL_SSP)
|
||||
hisi_hba->hw->sl_notify_ssp(hisi_hba, phy_no);
|
||||
hisi_sas_bytes_dmaed(hisi_hba, phy_no, GFP_KERNEL);
|
||||
|
@ -899,6 +900,8 @@ static void hisi_sas_wait_phyup_timedout(struct timer_list *t)
|
|||
hisi_sas_notify_phy_event(phy, HISI_PHYE_LINK_RESET);
|
||||
}
|
||||
|
||||
#define HISI_SAS_WAIT_PHYUP_RETRIES 10
|
||||
|
||||
void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no)
|
||||
{
|
||||
struct hisi_sas_phy *phy = &hisi_hba->phy[phy_no];
|
||||
|
@ -909,8 +912,16 @@ void hisi_sas_phy_oob_ready(struct hisi_hba *hisi_hba, int phy_no)
|
|||
return;
|
||||
|
||||
if (!timer_pending(&phy->timer)) {
|
||||
phy->timer.expires = jiffies + HISI_SAS_WAIT_PHYUP_TIMEOUT * HZ;
|
||||
add_timer(&phy->timer);
|
||||
if (phy->wait_phyup_cnt < HISI_SAS_WAIT_PHYUP_RETRIES) {
|
||||
phy->wait_phyup_cnt++;
|
||||
phy->timer.expires = jiffies +
|
||||
HISI_SAS_WAIT_PHYUP_TIMEOUT;
|
||||
add_timer(&phy->timer);
|
||||
} else {
|
||||
dev_warn(dev, "phy%d failed to come up %d times, giving up\n",
|
||||
phy_no, phy->wait_phyup_cnt);
|
||||
phy->wait_phyup_cnt = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hisi_sas_phy_oob_ready);
|
||||
|
@ -1063,7 +1074,7 @@ static void hisi_sas_dev_gone(struct domain_device *device)
|
|||
down(&hisi_hba->sem);
|
||||
if (!test_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags)) {
|
||||
hisi_sas_internal_task_abort(hisi_hba, device,
|
||||
HISI_SAS_INT_ABT_DEV, 0);
|
||||
HISI_SAS_INT_ABT_DEV, 0, true);
|
||||
|
||||
hisi_sas_dereg_device(hisi_hba, device);
|
||||
|
||||
|
@ -1182,9 +1193,9 @@ static void hisi_sas_tmf_timedout(struct timer_list *t)
|
|||
complete(&task->slow_task->completion);
|
||||
}
|
||||
|
||||
#define TASK_TIMEOUT 20
|
||||
#define TASK_RETRY 3
|
||||
#define INTERNAL_ABORT_TIMEOUT 6
|
||||
#define TASK_TIMEOUT (20 * HZ)
|
||||
#define TASK_RETRY 3
|
||||
#define INTERNAL_ABORT_TIMEOUT (6 * HZ)
|
||||
static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
|
||||
void *parameter, u32 para_len,
|
||||
struct hisi_sas_tmf_task *tmf)
|
||||
|
@ -1212,7 +1223,7 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device,
|
|||
task->task_done = hisi_sas_task_done;
|
||||
|
||||
task->slow_task->timer.function = hisi_sas_tmf_timedout;
|
||||
task->slow_task->timer.expires = jiffies + TASK_TIMEOUT * HZ;
|
||||
task->slow_task->timer.expires = jiffies + TASK_TIMEOUT;
|
||||
add_timer(&task->slow_task->timer);
|
||||
|
||||
res = hisi_sas_task_exec(task, GFP_KERNEL, 1, tmf);
|
||||
|
@ -1505,7 +1516,8 @@ static void hisi_sas_terminate_stp_reject(struct hisi_hba *hisi_hba)
|
|||
continue;
|
||||
|
||||
rc = hisi_sas_internal_task_abort(hisi_hba, device,
|
||||
HISI_SAS_INT_ABT_DEV, 0);
|
||||
HISI_SAS_INT_ABT_DEV, 0,
|
||||
false);
|
||||
if (rc < 0)
|
||||
dev_err(dev, "STP reject: abort dev failed %d\n", rc);
|
||||
}
|
||||
|
@ -1604,6 +1616,7 @@ static int hisi_sas_controller_reset(struct hisi_hba *hisi_hba)
|
|||
}
|
||||
|
||||
hisi_sas_controller_reset_done(hisi_hba);
|
||||
clear_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags);
|
||||
dev_info(dev, "controller reset complete\n");
|
||||
|
||||
return 0;
|
||||
|
@ -1660,7 +1673,8 @@ static int hisi_sas_abort_task(struct sas_task *task)
|
|||
&tmf_task);
|
||||
|
||||
rc2 = hisi_sas_internal_task_abort(hisi_hba, device,
|
||||
HISI_SAS_INT_ABT_CMD, tag);
|
||||
HISI_SAS_INT_ABT_CMD, tag,
|
||||
false);
|
||||
if (rc2 < 0) {
|
||||
dev_err(dev, "abort task: internal abort (%d)\n", rc2);
|
||||
return TMF_RESP_FUNC_FAILED;
|
||||
|
@ -1682,7 +1696,7 @@ static int hisi_sas_abort_task(struct sas_task *task)
|
|||
if (task->dev->dev_type == SAS_SATA_DEV) {
|
||||
rc = hisi_sas_internal_task_abort(hisi_hba, device,
|
||||
HISI_SAS_INT_ABT_DEV,
|
||||
0);
|
||||
0, false);
|
||||
if (rc < 0) {
|
||||
dev_err(dev, "abort task: internal abort failed\n");
|
||||
goto out;
|
||||
|
@ -1697,7 +1711,8 @@ static int hisi_sas_abort_task(struct sas_task *task)
|
|||
struct hisi_sas_cq *cq = &hisi_hba->cq[slot->dlvry_queue];
|
||||
|
||||
rc = hisi_sas_internal_task_abort(hisi_hba, device,
|
||||
HISI_SAS_INT_ABT_CMD, tag);
|
||||
HISI_SAS_INT_ABT_CMD, tag,
|
||||
false);
|
||||
if (((rc < 0) || (rc == TMF_RESP_FUNC_FAILED)) &&
|
||||
task->lldd_task) {
|
||||
/*
|
||||
|
@ -1723,7 +1738,7 @@ static int hisi_sas_abort_task_set(struct domain_device *device, u8 *lun)
|
|||
int rc;
|
||||
|
||||
rc = hisi_sas_internal_task_abort(hisi_hba, device,
|
||||
HISI_SAS_INT_ABT_DEV, 0);
|
||||
HISI_SAS_INT_ABT_DEV, 0, false);
|
||||
if (rc < 0) {
|
||||
dev_err(dev, "abort task set: internal abort rc=%d\n", rc);
|
||||
return TMF_RESP_FUNC_FAILED;
|
||||
|
@ -1750,6 +1765,8 @@ static int hisi_sas_clear_aca(struct domain_device *device, u8 *lun)
|
|||
return rc;
|
||||
}
|
||||
|
||||
#define I_T_NEXUS_RESET_PHYUP_TIMEOUT (2 * HZ)
|
||||
|
||||
static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
|
||||
{
|
||||
struct sas_phy *local_phy = sas_get_local_phy(device);
|
||||
|
@ -1784,7 +1801,8 @@ static int hisi_sas_debug_I_T_nexus_reset(struct domain_device *device)
|
|||
sas_ha->sas_phy[local_phy->number];
|
||||
struct hisi_sas_phy *phy =
|
||||
container_of(sas_phy, struct hisi_sas_phy, sas_phy);
|
||||
int ret = wait_for_completion_timeout(&phyreset, 2 * HZ);
|
||||
int ret = wait_for_completion_timeout(&phyreset,
|
||||
I_T_NEXUS_RESET_PHYUP_TIMEOUT);
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&phy->lock, flags);
|
||||
|
@ -1814,7 +1832,7 @@ static int hisi_sas_I_T_nexus_reset(struct domain_device *device)
|
|||
int rc;
|
||||
|
||||
rc = hisi_sas_internal_task_abort(hisi_hba, device,
|
||||
HISI_SAS_INT_ABT_DEV, 0);
|
||||
HISI_SAS_INT_ABT_DEV, 0, false);
|
||||
if (rc < 0) {
|
||||
dev_err(dev, "I_T nexus reset: internal abort (%d)\n", rc);
|
||||
return TMF_RESP_FUNC_FAILED;
|
||||
|
@ -1844,7 +1862,7 @@ static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
|
|||
|
||||
/* Clear internal IO and then lu reset */
|
||||
rc = hisi_sas_internal_task_abort(hisi_hba, device,
|
||||
HISI_SAS_INT_ABT_DEV, 0);
|
||||
HISI_SAS_INT_ABT_DEV, 0, false);
|
||||
if (rc < 0) {
|
||||
dev_err(dev, "lu_reset: internal abort failed\n");
|
||||
goto out;
|
||||
|
@ -1875,12 +1893,24 @@ static int hisi_sas_lu_reset(struct domain_device *device, u8 *lun)
|
|||
return rc;
|
||||
}
|
||||
|
||||
static void hisi_sas_async_I_T_nexus_reset(void *data, async_cookie_t cookie)
|
||||
{
|
||||
struct domain_device *device = data;
|
||||
struct hisi_hba *hisi_hba = dev_to_hisi_hba(device);
|
||||
int rc;
|
||||
|
||||
rc = hisi_sas_debug_I_T_nexus_reset(device);
|
||||
if (rc != TMF_RESP_FUNC_COMPLETE)
|
||||
dev_info(hisi_hba->dev, "I_T_nexus reset fail for dev:%016llx rc=%d\n",
|
||||
SAS_ADDR(device->sas_addr), rc);
|
||||
}
|
||||
|
||||
static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
|
||||
{
|
||||
struct hisi_hba *hisi_hba = sas_ha->lldd_ha;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
HISI_SAS_DECLARE_RST_WORK_ON_STACK(r);
|
||||
int rc, i;
|
||||
ASYNC_DOMAIN_EXCLUSIVE(async);
|
||||
int i;
|
||||
|
||||
queue_work(hisi_hba->wq, &r.work);
|
||||
wait_for_completion(r.completion);
|
||||
|
@ -1895,12 +1925,11 @@ static int hisi_sas_clear_nexus_ha(struct sas_ha_struct *sas_ha)
|
|||
dev_is_expander(device->dev_type))
|
||||
continue;
|
||||
|
||||
rc = hisi_sas_debug_I_T_nexus_reset(device);
|
||||
if (rc != TMF_RESP_FUNC_COMPLETE)
|
||||
dev_info(dev, "clear nexus ha: for device[%d] rc=%d\n",
|
||||
sas_dev->device_id, rc);
|
||||
async_schedule_domain(hisi_sas_async_I_T_nexus_reset,
|
||||
device, &async);
|
||||
}
|
||||
|
||||
async_synchronize_full_domain(&async);
|
||||
hisi_sas_release_tasks(hisi_hba);
|
||||
|
||||
return TMF_RESP_FUNC_COMPLETE;
|
||||
|
@ -2029,11 +2058,13 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id,
|
|||
* @tag: tag of IO to be aborted (only relevant to single
|
||||
* IO mode)
|
||||
* @dq: delivery queue for this internal abort command
|
||||
* @rst_to_recover: If rst_to_recover set, queue a controller
|
||||
* reset if an internal abort times out.
|
||||
*/
|
||||
static int
|
||||
_hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
|
||||
struct domain_device *device, int abort_flag,
|
||||
int tag, struct hisi_sas_dq *dq)
|
||||
int tag, struct hisi_sas_dq *dq, bool rst_to_recover)
|
||||
{
|
||||
struct sas_task *task;
|
||||
struct hisi_sas_device *sas_dev = device->lldd_dev;
|
||||
|
@ -2049,6 +2080,9 @@ _hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
|
|||
if (!hisi_hba->hw->prep_abort)
|
||||
return TMF_RESP_FUNC_FAILED;
|
||||
|
||||
if (test_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags))
|
||||
return -EIO;
|
||||
|
||||
task = sas_alloc_slow_task(GFP_KERNEL);
|
||||
if (!task)
|
||||
return -ENOMEM;
|
||||
|
@ -2057,7 +2091,7 @@ _hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
|
|||
task->task_proto = device->tproto;
|
||||
task->task_done = hisi_sas_task_done;
|
||||
task->slow_task->timer.function = hisi_sas_tmf_timedout;
|
||||
task->slow_task->timer.expires = jiffies + INTERNAL_ABORT_TIMEOUT * HZ;
|
||||
task->slow_task->timer.expires = jiffies + INTERNAL_ABORT_TIMEOUT;
|
||||
add_timer(&task->slow_task->timer);
|
||||
|
||||
res = hisi_sas_internal_abort_task_exec(hisi_hba, sas_dev->device_id,
|
||||
|
@ -2079,6 +2113,8 @@ _hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
|
|||
if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
|
||||
struct hisi_sas_slot *slot = task->lldd_task;
|
||||
|
||||
set_bit(HISI_SAS_HW_FAULT_BIT, &hisi_hba->flags);
|
||||
|
||||
if (slot) {
|
||||
struct hisi_sas_cq *cq =
|
||||
&hisi_hba->cq[slot->dlvry_queue];
|
||||
|
@ -2089,7 +2125,13 @@ _hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
|
|||
synchronize_irq(cq->irq_no);
|
||||
slot->task = NULL;
|
||||
}
|
||||
dev_err(dev, "internal task abort: timeout and not done.\n");
|
||||
|
||||
if (rst_to_recover) {
|
||||
dev_err(dev, "internal task abort: timeout and not done. Queuing reset.\n");
|
||||
queue_work(hisi_hba->wq, &hisi_hba->rst_work);
|
||||
} else {
|
||||
dev_err(dev, "internal task abort: timeout and not done.\n");
|
||||
}
|
||||
|
||||
res = -EIO;
|
||||
goto exit;
|
||||
|
@ -2122,7 +2164,7 @@ _hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
|
|||
static int
|
||||
hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
|
||||
struct domain_device *device,
|
||||
int abort_flag, int tag)
|
||||
int abort_flag, int tag, bool rst_to_recover)
|
||||
{
|
||||
struct hisi_sas_slot *slot;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
|
@ -2134,7 +2176,8 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
|
|||
slot = &hisi_hba->slot_info[tag];
|
||||
dq = &hisi_hba->dq[slot->dlvry_queue];
|
||||
return _hisi_sas_internal_task_abort(hisi_hba, device,
|
||||
abort_flag, tag, dq);
|
||||
abort_flag, tag, dq,
|
||||
rst_to_recover);
|
||||
case HISI_SAS_INT_ABT_DEV:
|
||||
for (i = 0; i < hisi_hba->cq_nvecs; i++) {
|
||||
struct hisi_sas_cq *cq = &hisi_hba->cq[i];
|
||||
|
@ -2145,7 +2188,7 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba,
|
|||
dq = &hisi_hba->dq[i];
|
||||
rc = _hisi_sas_internal_task_abort(hisi_hba, device,
|
||||
abort_flag, tag,
|
||||
dq);
|
||||
dq, rst_to_recover);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
|
|
@ -1152,14 +1152,14 @@ static void slot_err_v1_hw(struct hisi_hba *hisi_hba,
|
|||
}
|
||||
default:
|
||||
{
|
||||
ts->stat = SAM_STAT_CHECK_CONDITION;
|
||||
ts->stat = SAS_SAM_STAT_CHECK_CONDITION;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
case SAS_PROTOCOL_SMP:
|
||||
ts->stat = SAM_STAT_CHECK_CONDITION;
|
||||
ts->stat = SAS_SAM_STAT_CHECK_CONDITION;
|
||||
break;
|
||||
|
||||
case SAS_PROTOCOL_SATA:
|
||||
|
@ -1281,7 +1281,7 @@ static void slot_complete_v1_hw(struct hisi_hba *hisi_hba,
|
|||
struct scatterlist *sg_resp = &task->smp_task.smp_resp;
|
||||
void *to = page_address(sg_page(sg_resp));
|
||||
|
||||
ts->stat = SAM_STAT_GOOD;
|
||||
ts->stat = SAS_SAM_STAT_GOOD;
|
||||
|
||||
dma_unmap_sg(dev, &task->smp_task.smp_req, 1,
|
||||
DMA_TO_DEVICE);
|
||||
|
@ -1298,7 +1298,7 @@ static void slot_complete_v1_hw(struct hisi_hba *hisi_hba,
|
|||
break;
|
||||
|
||||
default:
|
||||
ts->stat = SAM_STAT_CHECK_CONDITION;
|
||||
ts->stat = SAS_SAM_STAT_CHECK_CONDITION;
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -1649,7 +1649,7 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
|
|||
if (irq < 0) {
|
||||
dev_err(dev, "irq init: fail map phy interrupt %d\n",
|
||||
idx);
|
||||
return -ENOENT;
|
||||
return irq;
|
||||
}
|
||||
|
||||
rc = devm_request_irq(dev, irq, phy_interrupts[j], 0,
|
||||
|
@ -1657,7 +1657,7 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
|
|||
if (rc) {
|
||||
dev_err(dev, "irq init: could not request phy interrupt %d, rc=%d\n",
|
||||
irq, rc);
|
||||
return -ENOENT;
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1668,7 +1668,7 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
|
|||
if (irq < 0) {
|
||||
dev_err(dev, "irq init: could not map cq interrupt %d\n",
|
||||
idx);
|
||||
return -ENOENT;
|
||||
return irq;
|
||||
}
|
||||
|
||||
rc = devm_request_irq(dev, irq, cq_interrupt_v1_hw, 0,
|
||||
|
@ -1676,7 +1676,7 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
|
|||
if (rc) {
|
||||
dev_err(dev, "irq init: could not request cq interrupt %d, rc=%d\n",
|
||||
irq, rc);
|
||||
return -ENOENT;
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1686,7 +1686,7 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
|
|||
if (irq < 0) {
|
||||
dev_err(dev, "irq init: could not map fatal interrupt %d\n",
|
||||
idx);
|
||||
return -ENOENT;
|
||||
return irq;
|
||||
}
|
||||
|
||||
rc = devm_request_irq(dev, irq, fatal_interrupts[i], 0,
|
||||
|
@ -1694,7 +1694,7 @@ static int interrupt_init_v1_hw(struct hisi_hba *hisi_hba)
|
|||
if (rc) {
|
||||
dev_err(dev, "irq init: could not request fatal interrupt %d, rc=%d\n",
|
||||
irq, rc);
|
||||
return -ENOENT;
|
||||
return rc;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -994,7 +994,7 @@ static int clear_itct_v2_hw(struct hisi_hba *hisi_hba,
|
|||
reg_val = ITCT_CLR_EN_MSK | (dev_id & ITCT_DEV_MSK);
|
||||
hisi_sas_write32(hisi_hba, ITCT_CLR, reg_val);
|
||||
if (!wait_for_completion_timeout(sas_dev->completion,
|
||||
CLEAR_ITCT_TIMEOUT * HZ)) {
|
||||
HISI_SAS_CLEAR_ITCT_TIMEOUT)) {
|
||||
dev_warn(dev, "failed to clear ITCT\n");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
@ -2168,7 +2168,7 @@ static void slot_err_v2_hw(struct hisi_hba *hisi_hba,
|
|||
}
|
||||
break;
|
||||
case SAS_PROTOCOL_SMP:
|
||||
ts->stat = SAM_STAT_CHECK_CONDITION;
|
||||
ts->stat = SAS_SAM_STAT_CHECK_CONDITION;
|
||||
break;
|
||||
|
||||
case SAS_PROTOCOL_SATA:
|
||||
|
@ -2427,7 +2427,7 @@ static void slot_complete_v2_hw(struct hisi_hba *hisi_hba,
|
|||
struct scatterlist *sg_resp = &task->smp_task.smp_resp;
|
||||
void *to = page_address(sg_page(sg_resp));
|
||||
|
||||
ts->stat = SAM_STAT_GOOD;
|
||||
ts->stat = SAS_SAM_STAT_GOOD;
|
||||
|
||||
dma_unmap_sg(dev, &task->smp_task.smp_req, 1,
|
||||
DMA_TO_DEVICE);
|
||||
|
@ -2441,12 +2441,12 @@ static void slot_complete_v2_hw(struct hisi_hba *hisi_hba,
|
|||
case SAS_PROTOCOL_STP:
|
||||
case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
|
||||
{
|
||||
ts->stat = SAM_STAT_GOOD;
|
||||
ts->stat = SAS_SAM_STAT_GOOD;
|
||||
hisi_sas_sata_done(task, slot);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
ts->stat = SAM_STAT_CHECK_CONDITION;
|
||||
ts->stat = SAS_SAM_STAT_CHECK_CONDITION;
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -843,7 +843,7 @@ static int clear_itct_v3_hw(struct hisi_hba *hisi_hba,
|
|||
hisi_sas_write32(hisi_hba, ITCT_CLR, reg_val);
|
||||
|
||||
if (!wait_for_completion_timeout(sas_dev->completion,
|
||||
CLEAR_ITCT_TIMEOUT * HZ)) {
|
||||
HISI_SAS_CLEAR_ITCT_TIMEOUT)) {
|
||||
dev_warn(dev, "failed to clear ITCT\n");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
@ -2178,7 +2178,7 @@ slot_err_v3_hw(struct hisi_hba *hisi_hba, struct sas_task *task,
|
|||
hisi_sas_sata_done(task, slot);
|
||||
break;
|
||||
case SAS_PROTOCOL_SMP:
|
||||
ts->stat = SAM_STAT_CHECK_CONDITION;
|
||||
ts->stat = SAS_SAM_STAT_CHECK_CONDITION;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
@ -2285,7 +2285,7 @@ static void slot_complete_v3_hw(struct hisi_hba *hisi_hba,
|
|||
struct scatterlist *sg_resp = &task->smp_task.smp_resp;
|
||||
void *to = page_address(sg_page(sg_resp));
|
||||
|
||||
ts->stat = SAM_STAT_GOOD;
|
||||
ts->stat = SAS_SAM_STAT_GOOD;
|
||||
|
||||
dma_unmap_sg(dev, &task->smp_task.smp_req, 1,
|
||||
DMA_TO_DEVICE);
|
||||
|
@ -2298,11 +2298,11 @@ static void slot_complete_v3_hw(struct hisi_hba *hisi_hba,
|
|||
case SAS_PROTOCOL_SATA:
|
||||
case SAS_PROTOCOL_STP:
|
||||
case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
|
||||
ts->stat = SAM_STAT_GOOD;
|
||||
ts->stat = SAS_SAM_STAT_GOOD;
|
||||
hisi_sas_sata_done(task, slot);
|
||||
break;
|
||||
default:
|
||||
ts->stat = SAM_STAT_CHECK_CONDITION;
|
||||
ts->stat = SAS_SAM_STAT_CHECK_CONDITION;
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -220,6 +220,9 @@ int scsi_add_host_with_dma(struct Scsi_Host *shost, struct device *dev,
|
|||
goto fail;
|
||||
}
|
||||
|
||||
shost->cmd_per_lun = min_t(short, shost->cmd_per_lun,
|
||||
shost->can_queue);
|
||||
|
||||
error = scsi_init_sense_cache(shost);
|
||||
if (error)
|
||||
goto fail;
|
||||
|
@ -319,7 +322,7 @@ static void scsi_host_dev_release(struct device *dev)
|
|||
|
||||
scsi_proc_hostdir_rm(shost->hostt);
|
||||
|
||||
/* Wait for functions invoked through call_rcu(&shost->rcu, ...) */
|
||||
/* Wait for functions invoked through call_rcu(&scmd->rcu, ...) */
|
||||
rcu_barrier();
|
||||
|
||||
if (shost->tmf_work_q)
|
||||
|
@ -657,10 +660,11 @@ EXPORT_SYMBOL_GPL(scsi_flush_work);
|
|||
static bool complete_all_cmds_iter(struct request *rq, void *data, bool rsvd)
|
||||
{
|
||||
struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq);
|
||||
int status = *(int *)data;
|
||||
enum scsi_host_status status = *(enum scsi_host_status *)data;
|
||||
|
||||
scsi_dma_unmap(scmd);
|
||||
scmd->result = status << 16;
|
||||
scmd->result = 0;
|
||||
set_host_byte(scmd, status);
|
||||
scmd->scsi_done(scmd);
|
||||
return true;
|
||||
}
|
||||
|
@ -675,7 +679,8 @@ static bool complete_all_cmds_iter(struct request *rq, void *data, bool rsvd)
|
|||
* caller to ensure that concurrent I/O submission and/or
|
||||
* completion is stopped when calling this function.
|
||||
*/
|
||||
void scsi_host_complete_all_commands(struct Scsi_Host *shost, int status)
|
||||
void scsi_host_complete_all_commands(struct Scsi_Host *shost,
|
||||
enum scsi_host_status status)
|
||||
{
|
||||
blk_mq_tagset_busy_iter(&shost->tag_set, complete_all_cmds_iter,
|
||||
&status);
|
||||
|
|
|
@ -760,7 +760,7 @@ static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag,
|
|||
goto skip_resid;
|
||||
|
||||
default:
|
||||
scp->result = DRIVER_INVALID << 24 | DID_ABORT << 16;
|
||||
scp->result = DID_ABORT << 16;
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -655,8 +655,10 @@ static void ibmvfc_reinit_host(struct ibmvfc_host *vhost)
|
|||
**/
|
||||
static void ibmvfc_del_tgt(struct ibmvfc_target *tgt)
|
||||
{
|
||||
if (!ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_RPORT))
|
||||
if (!ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_RPORT)) {
|
||||
tgt->job_step = ibmvfc_tgt_implicit_logout_and_del;
|
||||
tgt->init_retries = 0;
|
||||
}
|
||||
wake_up(&tgt->vhost->work_wait_q);
|
||||
}
|
||||
|
||||
|
@ -4300,9 +4302,10 @@ static void ibmvfc_tgt_move_login_done(struct ibmvfc_event *evt)
|
|||
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
|
||||
switch (status) {
|
||||
case IBMVFC_MAD_SUCCESS:
|
||||
tgt_dbg(tgt, "Move Login succeeded for old scsi_id: %llX\n", tgt->old_scsi_id);
|
||||
tgt_dbg(tgt, "Move Login succeeded for new scsi_id: %llX\n", tgt->new_scsi_id);
|
||||
tgt->ids.node_name = wwn_to_u64(rsp->service_parms.node_name);
|
||||
tgt->ids.port_name = wwn_to_u64(rsp->service_parms.port_name);
|
||||
tgt->scsi_id = tgt->new_scsi_id;
|
||||
tgt->ids.port_id = tgt->scsi_id;
|
||||
memcpy(&tgt->service_parms, &rsp->service_parms,
|
||||
sizeof(tgt->service_parms));
|
||||
|
@ -4320,8 +4323,8 @@ static void ibmvfc_tgt_move_login_done(struct ibmvfc_event *evt)
|
|||
level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_move_login);
|
||||
|
||||
tgt_log(tgt, level,
|
||||
"Move Login failed: old scsi_id: %llX, flags:%x, vios_flags:%x, rc=0x%02X\n",
|
||||
tgt->old_scsi_id, be32_to_cpu(rsp->flags), be16_to_cpu(rsp->vios_flags),
|
||||
"Move Login failed: new scsi_id: %llX, flags:%x, vios_flags:%x, rc=0x%02X\n",
|
||||
tgt->new_scsi_id, be32_to_cpu(rsp->flags), be16_to_cpu(rsp->vios_flags),
|
||||
status);
|
||||
break;
|
||||
}
|
||||
|
@ -4358,8 +4361,8 @@ static void ibmvfc_tgt_move_login(struct ibmvfc_target *tgt)
|
|||
move->common.opcode = cpu_to_be32(IBMVFC_MOVE_LOGIN);
|
||||
move->common.length = cpu_to_be16(sizeof(*move));
|
||||
|
||||
move->old_scsi_id = cpu_to_be64(tgt->old_scsi_id);
|
||||
move->new_scsi_id = cpu_to_be64(tgt->scsi_id);
|
||||
move->old_scsi_id = cpu_to_be64(tgt->scsi_id);
|
||||
move->new_scsi_id = cpu_to_be64(tgt->new_scsi_id);
|
||||
move->wwpn = cpu_to_be64(tgt->wwpn);
|
||||
move->node_name = cpu_to_be64(tgt->ids.node_name);
|
||||
|
||||
|
@ -4368,7 +4371,7 @@ static void ibmvfc_tgt_move_login(struct ibmvfc_target *tgt)
|
|||
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
|
||||
kref_put(&tgt->kref, ibmvfc_release_tgt);
|
||||
} else
|
||||
tgt_dbg(tgt, "Sent Move Login for old scsi_id: %llX\n", tgt->old_scsi_id);
|
||||
tgt_dbg(tgt, "Sent Move Login for new scsi_id: %llX\n", tgt->new_scsi_id);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -4728,20 +4731,25 @@ static int ibmvfc_alloc_target(struct ibmvfc_host *vhost,
|
|||
* and it failed for some reason, such as there being I/O
|
||||
* pending to the target. In this case, we will have already
|
||||
* deleted the rport from the FC transport so we do a move
|
||||
* login, which works even with I/O pending, as it will cancel
|
||||
* any active commands.
|
||||
* login, which works even with I/O pending, however, if
|
||||
* there is still I/O pending, it will stay outstanding, so
|
||||
* we only do this if fast fail is disabled for the rport,
|
||||
* otherwise we let terminate_rport_io clean up the port
|
||||
* before we login at the new location.
|
||||
*/
|
||||
if (wtgt->action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
|
||||
/*
|
||||
* Do a move login here. The old target is no longer
|
||||
* known to the transport layer We don't use the
|
||||
* normal ibmvfc_set_tgt_action to set this, as we
|
||||
* don't normally want to allow this state change.
|
||||
*/
|
||||
wtgt->old_scsi_id = wtgt->scsi_id;
|
||||
wtgt->scsi_id = scsi_id;
|
||||
wtgt->action = IBMVFC_TGT_ACTION_INIT;
|
||||
ibmvfc_init_tgt(wtgt, ibmvfc_tgt_move_login);
|
||||
if (wtgt->move_login) {
|
||||
/*
|
||||
* Do a move login here. The old target is no longer
|
||||
* known to the transport layer We don't use the
|
||||
* normal ibmvfc_set_tgt_action to set this, as we
|
||||
* don't normally want to allow this state change.
|
||||
*/
|
||||
wtgt->new_scsi_id = scsi_id;
|
||||
wtgt->action = IBMVFC_TGT_ACTION_INIT;
|
||||
wtgt->init_retries = 0;
|
||||
ibmvfc_init_tgt(wtgt, ibmvfc_tgt_move_login);
|
||||
}
|
||||
goto unlock_out;
|
||||
} else {
|
||||
tgt_err(wtgt, "Unexpected target state: %d, %p\n",
|
||||
|
@ -5332,6 +5340,7 @@ static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
|
|||
tgt_dbg(tgt, "Deleting rport with outstanding I/O\n");
|
||||
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT);
|
||||
tgt->rport = NULL;
|
||||
tgt->init_retries = 0;
|
||||
spin_unlock_irqrestore(vhost->host->host_lock, flags);
|
||||
fc_remote_port_delete(rport);
|
||||
return;
|
||||
|
@ -5486,7 +5495,20 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
|
|||
tgt_dbg(tgt, "Deleting rport with I/O outstanding\n");
|
||||
rport = tgt->rport;
|
||||
tgt->rport = NULL;
|
||||
tgt->init_retries = 0;
|
||||
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT);
|
||||
|
||||
/*
|
||||
* If fast fail is enabled, we wait for it to fire and then clean up
|
||||
* the old port, since we expect the fast fail timer to clean up the
|
||||
* outstanding I/O faster than waiting for normal command timeouts.
|
||||
* However, if fast fail is disabled, any I/O outstanding to the
|
||||
* rport LUNs will stay outstanding indefinitely, since the EH handlers
|
||||
* won't get invoked for I/O's timing out. If this is a NPIV failover
|
||||
* scenario, the better alternative is to use the move login.
|
||||
*/
|
||||
if (rport && rport->fast_io_fail_tmo == -1)
|
||||
tgt->move_login = 1;
|
||||
spin_unlock_irqrestore(vhost->host->host_lock, flags);
|
||||
if (rport)
|
||||
fc_remote_port_delete(rport);
|
||||
|
|
|
@ -718,7 +718,7 @@ struct ibmvfc_target {
|
|||
struct ibmvfc_host *vhost;
|
||||
u64 scsi_id;
|
||||
u64 wwpn;
|
||||
u64 old_scsi_id;
|
||||
u64 new_scsi_id;
|
||||
struct fc_rport *rport;
|
||||
int target_id;
|
||||
enum ibmvfc_target_action action;
|
||||
|
@ -726,6 +726,7 @@ struct ibmvfc_target {
|
|||
int add_rport;
|
||||
int init_retries;
|
||||
int logo_rcvd;
|
||||
int move_login;
|
||||
u32 cancel_key;
|
||||
struct ibmvfc_service_parms service_parms;
|
||||
struct ibmvfc_service_parms service_parms_change;
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue