mirror of https://gitee.com/openkylin/linux.git
SCSI misc on 20201013
This series consists of the usual driver updates (ufs, qla2xxx, tcmu, ibmvfc, lpfc, smartpqi, hisi_sas, qedi, qedf, mpt3sas) and minor bug fixes. There are only three core changes: adding sense codes, cleaning up noretry and adding an option for limitless retries. Signed-off-by: James E.J. Bottomley <jejb@linux.ibm.com> -----BEGIN PGP SIGNATURE----- iJwEABMIAEQWIQTnYEDbdso9F2cI+arnQslM7pishQUCX4YulyYcamFtZXMuYm90 dG9tbGV5QGhhbnNlbnBhcnRuZXJzaGlwLmNvbQAKCRDnQslM7pishaZDAQCT7rwG UEZYHgYkU9EX9ERVBQM0SW4mLrxf3g3P5ioJsAEAtkclCM4QsIOP+MIPjIa0EyUY khu0kcrmeFR2YwA8zhw= =4w4S -----END PGP SIGNATURE----- Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi Pull SCSI updates from James Bottomley: "The usual driver updates (ufs, qla2xxx, tcmu, ibmvfc, lpfc, smartpqi, hisi_sas, qedi, qedf, mpt3sas) and minor bug fixes. There are only three core changes: adding sense codes, cleaning up noretry and adding an option for limitless retries" * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (226 commits) scsi: hisi_sas: Recover PHY state according to the status before reset scsi: hisi_sas: Filter out new PHY up events during suspend scsi: hisi_sas: Add device link between SCSI devices and hisi_hba scsi: hisi_sas: Add check for methods _PS0 and _PR0 scsi: hisi_sas: Add controller runtime PM support for v3 hw scsi: hisi_sas: Switch to new framework to support suspend and resume scsi: hisi_sas: Use hisi_hba->cq_nvecs for calling calling synchronize_irq() scsi: qedf: Remove redundant assignment to variable 'rc' scsi: lpfc: Remove unneeded variable 'status' in lpfc_fcp_cpu_map_store() scsi: snic: Convert to use DEFINE_SEQ_ATTRIBUTE macro scsi: qla4xxx: Delete unneeded variable 'status' in qla4xxx_process_ddb_changed scsi: sun_esp: Use module_platform_driver to simplify the code scsi: sun3x_esp: Use module_platform_driver to simplify the code scsi: sni_53c710: Use module_platform_driver to simplify the code scsi: qlogicpti: Use module_platform_driver to simplify the code scsi: mac_esp: Use module_platform_driver to simplify the code scsi: jazz_esp: Use module_platform_driver to simplify the code scsi: mvumi: Fix error return in mvumi_io_attach() scsi: lpfc: Drop nodelist reference on error in lpfc_gen_req() scsi: be2iscsi: Fix a theoretical leak in beiscsi_create_eqs() ...
This commit is contained in:
commit
55e0500eb5
|
@ -9,7 +9,9 @@ contain a phandle reference to UFS M-PHY node.
|
|||
Required properties for UFS nodes:
|
||||
- compatible : Compatible list, contains the following controller:
|
||||
"mediatek,mt8183-ufshci" for MediaTek UFS host controller
|
||||
present on MT81xx chipsets.
|
||||
present on MT8183 chipsets.
|
||||
"mediatek,mt8192-ufshci" for MediaTek UFS host controller
|
||||
present on MT8192 chipsets.
|
||||
- reg : Address and length of the UFS register set.
|
||||
- phys : phandle to m-phy.
|
||||
- clocks : List of phandle and clock specifier pairs.
|
||||
|
|
|
@ -271,12 +271,6 @@ Conventions
|
|||
First, Linus Torvalds's thoughts on C coding style can be found in the
|
||||
Documentation/process/coding-style.rst file.
|
||||
|
||||
Next, there is a movement to "outlaw" typedefs introducing synonyms for
|
||||
struct tags. Both can be still found in the SCSI subsystem, but
|
||||
the typedefs have been moved to a single file, scsi_typedefs.h to
|
||||
make their future removal easier, for example:
|
||||
"typedef struct scsi_cmnd Scsi_Cmnd;"
|
||||
|
||||
Also, most C99 enhancements are encouraged to the extent they are supported
|
||||
by the relevant gcc compilers. So C99 style structure and array
|
||||
initializers are encouraged where appropriate. Don't go too far,
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
.. SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
=====================================
|
||||
SMARTPQI - Microsemi Smart PQI Driver
|
||||
=====================================
|
||||
==============================================
|
||||
SMARTPQI - Microchip Smart Storage SCSI driver
|
||||
==============================================
|
||||
|
||||
This file describes the smartpqi SCSI driver for Microsemi
|
||||
(http://www.microsemi.com) PQI controllers. The smartpqi driver
|
||||
is the next generation SCSI driver for Microsemi Corp. The smartpqi
|
||||
This file describes the smartpqi SCSI driver for Microchip
|
||||
(http://www.microchip.com) PQI controllers. The smartpqi driver
|
||||
is the next generation SCSI driver for Microchip Corp. The smartpqi
|
||||
driver is the first SCSI driver to implement the PQI queuing model.
|
||||
|
||||
The smartpqi driver will replace the aacraid driver for Adaptec Series 9
|
||||
|
@ -14,7 +14,7 @@ controllers. Customers running an older kernel (Pre-4.9) using an Adaptec
|
|||
Series 9 controller will have to configure the smartpqi driver or their
|
||||
volumes will not be added to the OS.
|
||||
|
||||
For Microsemi smartpqi controller support, enable the smartpqi driver
|
||||
For Microchip smartpqi controller support, enable the smartpqi driver
|
||||
when configuring the kernel.
|
||||
|
||||
For more information on the PQI Queuing Interface, please see:
|
||||
|
|
28
MAINTAINERS
28
MAINTAINERS
|
@ -3520,13 +3520,17 @@ F: drivers/net/ethernet/broadcom/bnx2.*
|
|||
F: drivers/net/ethernet/broadcom/bnx2_*
|
||||
|
||||
BROADCOM BNX2FC 10 GIGABIT FCOE DRIVER
|
||||
M: QLogic-Storage-Upstream@qlogic.com
|
||||
M: Saurav Kashyap <skashyap@marvell.com>
|
||||
M: Javed Hasan <jhasan@marvell.com>
|
||||
M: GR-QLogic-Storage-Upstream@marvell.com
|
||||
L: linux-scsi@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/scsi/bnx2fc/
|
||||
|
||||
BROADCOM BNX2I 1/10 GIGABIT iSCSI DRIVER
|
||||
M: QLogic-Storage-Upstream@qlogic.com
|
||||
M: Nilesh Javali <njavali@marvell.com>
|
||||
M: Manish Rangankar <mrangankar@marvell.com>
|
||||
M: GR-QLogic-Storage-Upstream@marvell.com
|
||||
L: linux-scsi@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/scsi/bnx2i/
|
||||
|
@ -7747,8 +7751,8 @@ F: Documentation/watchdog/hpwdt.rst
|
|||
F: drivers/watchdog/hpwdt.c
|
||||
|
||||
HEWLETT-PACKARD SMART ARRAY RAID DRIVER (hpsa)
|
||||
M: Don Brace <don.brace@microsemi.com>
|
||||
L: esc.storagedev@microsemi.com
|
||||
M: Don Brace <don.brace@microchip.com>
|
||||
L: storagedev@microchip.com
|
||||
L: linux-scsi@vger.kernel.org
|
||||
S: Supported
|
||||
F: Documentation/scsi/hpsa.rst
|
||||
|
@ -11546,8 +11550,8 @@ F: arch/mips/configs/generic/board-ocelot.config
|
|||
F: arch/mips/generic/board-ocelot.c
|
||||
|
||||
MICROSEMI SMART ARRAY SMARTPQI DRIVER (smartpqi)
|
||||
M: Don Brace <don.brace@microsemi.com>
|
||||
L: esc.storagedev@microsemi.com
|
||||
M: Don Brace <don.brace@microchip.com>
|
||||
L: storagedev@microchip.com
|
||||
L: linux-scsi@vger.kernel.org
|
||||
S: Supported
|
||||
F: Documentation/scsi/smartpqi.rst
|
||||
|
@ -14208,13 +14212,17 @@ S: Supported
|
|||
F: drivers/infiniband/hw/qib/
|
||||
|
||||
QLOGIC QL41xxx FCOE DRIVER
|
||||
M: QLogic-Storage-Upstream@cavium.com
|
||||
M: Saurav Kashyap <skashyap@marvell.com>
|
||||
M: Javed Hasan <jhasan@marvell.com>
|
||||
M: GR-QLogic-Storage-Upstream@marvell.com
|
||||
L: linux-scsi@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/scsi/qedf/
|
||||
|
||||
QLOGIC QL41xxx ISCSI DRIVER
|
||||
M: QLogic-Storage-Upstream@cavium.com
|
||||
M: Nilesh Javali <njavali@marvell.com>
|
||||
M: Manish Rangankar <mrangankar@marvell.com>
|
||||
M: GR-QLogic-Storage-Upstream@marvell.com
|
||||
L: linux-scsi@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/scsi/qedi/
|
||||
|
@ -14258,7 +14266,9 @@ F: Documentation/networking/device_drivers/qlogic/LICENSE.qla3xxx
|
|||
F: drivers/net/ethernet/qlogic/qla3xxx.*
|
||||
|
||||
QLOGIC QLA4XXX iSCSI DRIVER
|
||||
M: QLogic-Storage-Upstream@qlogic.com
|
||||
M: Nilesh Javali <njavali@marvell.com>
|
||||
M: Manish Rangankar <mrangankar@marvell.com>
|
||||
M: GR-QLogic-Storage-Upstream@marvell.com
|
||||
L: linux-scsi@vger.kernel.org
|
||||
S: Supported
|
||||
F: Documentation/scsi/LICENSE.qla4xxx
|
||||
|
|
|
@ -2593,7 +2593,7 @@ mptctl_hp_targetinfo(MPT_ADAPTER *ioc, unsigned long arg)
|
|||
/* Get the data transfer speeds
|
||||
*/
|
||||
data_sz = ioc->spi_data.sdp0length * 4;
|
||||
pg0_alloc = (SCSIDevicePage0_t *) pci_alloc_consistent(ioc->pcidev, data_sz, &page_dma);
|
||||
pg0_alloc = pci_alloc_consistent(ioc->pcidev, data_sz, &page_dma);
|
||||
if (pg0_alloc) {
|
||||
hdr.PageVersion = ioc->spi_data.sdp0version;
|
||||
hdr.PageLength = data_sz;
|
||||
|
@ -2657,8 +2657,7 @@ mptctl_hp_targetinfo(MPT_ADAPTER *ioc, unsigned long arg)
|
|||
/* Issue the second config page request */
|
||||
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
|
||||
data_sz = (int) cfg.cfghdr.hdr->PageLength * 4;
|
||||
pg3_alloc = (SCSIDevicePage3_t *) pci_alloc_consistent(
|
||||
ioc->pcidev, data_sz, &page_dma);
|
||||
pg3_alloc = pci_alloc_consistent(ioc->pcidev, data_sz, &page_dma);
|
||||
if (pg3_alloc) {
|
||||
cfg.physAddr = page_dma;
|
||||
cfg.pageAddr = (karg.hdr.channel << 8) | karg.hdr.id;
|
||||
|
|
|
@ -763,7 +763,7 @@ mptfc_GetFcPortPage0(MPT_ADAPTER *ioc, int portnum)
|
|||
|
||||
data_sz = hdr.PageLength * 4;
|
||||
rc = -ENOMEM;
|
||||
ppage0_alloc = (FCPortPage0_t *) pci_alloc_consistent(ioc->pcidev, data_sz, &page0_dma);
|
||||
ppage0_alloc = pci_alloc_consistent(ioc->pcidev, data_sz, &page0_dma);
|
||||
if (ppage0_alloc) {
|
||||
|
||||
try_again:
|
||||
|
@ -904,7 +904,7 @@ mptfc_GetFcPortPage1(MPT_ADAPTER *ioc, int portnum)
|
|||
if (data_sz < sizeof(FCPortPage1_t))
|
||||
data_sz = sizeof(FCPortPage1_t);
|
||||
|
||||
page1_alloc = (FCPortPage1_t *) pci_alloc_consistent(ioc->pcidev,
|
||||
page1_alloc = pci_alloc_consistent(ioc->pcidev,
|
||||
data_sz,
|
||||
&page1_dma);
|
||||
if (!page1_alloc)
|
||||
|
@ -922,8 +922,6 @@ mptfc_GetFcPortPage1(MPT_ADAPTER *ioc, int portnum)
|
|||
}
|
||||
}
|
||||
|
||||
memset(page1_alloc,0,data_sz);
|
||||
|
||||
cfg.physAddr = page1_dma;
|
||||
cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
|
||||
|
||||
|
|
|
@ -1516,7 +1516,6 @@ mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, u64 lun,
|
|||
int ii;
|
||||
int retval;
|
||||
MPT_ADAPTER *ioc = hd->ioc;
|
||||
unsigned long timeleft;
|
||||
u8 issue_hard_reset;
|
||||
u32 ioc_raw_state;
|
||||
unsigned long time_count;
|
||||
|
@ -1614,7 +1613,7 @@ mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, u64 lun,
|
|||
}
|
||||
}
|
||||
|
||||
timeleft = wait_for_completion_timeout(&ioc->taskmgmt_cmds.done,
|
||||
wait_for_completion_timeout(&ioc->taskmgmt_cmds.done,
|
||||
timeout*HZ);
|
||||
if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
|
||||
retval = FAILED;
|
||||
|
|
|
@ -1607,7 +1607,6 @@ static enum zfcp_erp_act_result zfcp_erp_strategy(
|
|||
static int zfcp_erp_thread(void *data)
|
||||
{
|
||||
struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
|
||||
struct list_head *next;
|
||||
struct zfcp_erp_action *act;
|
||||
unsigned long flags;
|
||||
|
||||
|
@ -1620,12 +1619,11 @@ static int zfcp_erp_thread(void *data)
|
|||
break;
|
||||
|
||||
write_lock_irqsave(&adapter->erp_lock, flags);
|
||||
next = adapter->erp_ready_head.next;
|
||||
act = list_first_entry_or_null(&adapter->erp_ready_head,
|
||||
struct zfcp_erp_action, list);
|
||||
write_unlock_irqrestore(&adapter->erp_lock, flags);
|
||||
|
||||
if (next != &adapter->erp_ready_head) {
|
||||
act = list_entry(next, struct zfcp_erp_action, list);
|
||||
|
||||
if (act) {
|
||||
/* there is more to come after dismission, no notify */
|
||||
if (zfcp_erp_strategy(act) != ZFCP_ERP_DISMISSED)
|
||||
zfcp_erp_wakeup(adapter);
|
||||
|
|
|
@ -426,9 +426,14 @@ static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req)
|
|||
* or it has been dismissed due to a queue shutdown, this function
|
||||
* is called to process the completion status and trigger further
|
||||
* events related to the FSF request.
|
||||
* Caller must ensure that the request has been removed from
|
||||
* adapter->req_list, to protect against concurrent modification
|
||||
* by zfcp_erp_strategy_check_fsfreq().
|
||||
*/
|
||||
static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
|
||||
{
|
||||
struct zfcp_erp_action *erp_action;
|
||||
|
||||
if (unlikely(zfcp_fsf_req_is_status_read_buffer(req))) {
|
||||
zfcp_fsf_status_read_handler(req);
|
||||
return;
|
||||
|
@ -439,8 +444,9 @@ static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
|
|||
zfcp_fsf_fsfstatus_eval(req);
|
||||
req->handler(req);
|
||||
|
||||
if (req->erp_action)
|
||||
zfcp_erp_notify(req->erp_action, 0);
|
||||
erp_action = req->erp_action;
|
||||
if (erp_action)
|
||||
zfcp_erp_notify(erp_action, 0);
|
||||
|
||||
if (likely(req->status & ZFCP_STATUS_FSFREQ_CLEANUP))
|
||||
zfcp_fsf_req_free(req);
|
||||
|
|
|
@ -242,7 +242,7 @@ int aac_commit = -1;
|
|||
int startup_timeout = 180;
|
||||
int aif_timeout = 120;
|
||||
int aac_sync_mode; /* Only Sync. transfer - disabled */
|
||||
int aac_convert_sgl = 1; /* convert non-conformable s/g list - enabled */
|
||||
static int aac_convert_sgl = 1; /* convert non-conformable s/g list - enabled */
|
||||
|
||||
module_param(aac_sync_mode, int, S_IRUGO|S_IWUSR);
|
||||
MODULE_PARM_DESC(aac_sync_mode, "Force sync. transfer mode"
|
||||
|
@ -290,7 +290,7 @@ MODULE_PARM_DESC(numacb, "Request a limit to the number of adapter control"
|
|||
" blocks (FIB) allocated. Valid values are 512 and down. Default is"
|
||||
" to use suggestion from Firmware.");
|
||||
|
||||
int acbsize = -1;
|
||||
static int acbsize = -1;
|
||||
module_param(acbsize, int, S_IRUGO|S_IWUSR);
|
||||
MODULE_PARM_DESC(acbsize, "Request a specific adapter control block (FIB)"
|
||||
" size. Valid values are 512, 2048, 4096 and 8192. Default is to use"
|
||||
|
@ -321,7 +321,7 @@ int aac_reset_devices;
|
|||
module_param_named(reset_devices, aac_reset_devices, int, S_IRUGO|S_IWUSR);
|
||||
MODULE_PARM_DESC(reset_devices, "Force an adapter reset at initialization.");
|
||||
|
||||
int aac_wwn = 1;
|
||||
static int aac_wwn = 1;
|
||||
module_param_named(wwn, aac_wwn, int, S_IRUGO|S_IWUSR);
|
||||
MODULE_PARM_DESC(wwn, "Select a WWN type for the arrays:\n"
|
||||
"\t0 - Disable\n"
|
||||
|
@ -2229,10 +2229,10 @@ int aac_get_adapter_info(struct aac_dev* dev)
|
|||
}
|
||||
|
||||
if (dev->dac_support) {
|
||||
if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(64))) {
|
||||
if (!dma_set_mask(&dev->pdev->dev, DMA_BIT_MASK(64))) {
|
||||
if (!dev->in_reset)
|
||||
dev_info(&dev->pdev->dev, "64 Bit DAC enabled\n");
|
||||
} else if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(32))) {
|
||||
} else if (!dma_set_mask(&dev->pdev->dev, DMA_BIT_MASK(32))) {
|
||||
dev_info(&dev->pdev->dev, "DMA mask set failed, 64 Bit DAC disabled\n");
|
||||
dev->dac_support = 0;
|
||||
} else {
|
||||
|
@ -3253,7 +3253,6 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
|
|||
case START_STOP:
|
||||
return aac_start_stop(scsicmd);
|
||||
|
||||
fallthrough;
|
||||
default:
|
||||
/*
|
||||
* Unhandled commands
|
||||
|
|
|
@ -670,8 +670,8 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
|
|||
goto cleanup;
|
||||
}
|
||||
}
|
||||
addr = pci_map_single(dev->pdev, p, sg_count[i],
|
||||
data_dir);
|
||||
addr = dma_map_single(&dev->pdev->dev, p, sg_count[i],
|
||||
data_dir);
|
||||
hbacmd->sge[i].addr_hi = cpu_to_le32((u32)(addr>>32));
|
||||
hbacmd->sge[i].addr_lo = cpu_to_le32(
|
||||
(u32)(addr & 0xffffffff));
|
||||
|
@ -732,8 +732,8 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
|
|||
goto cleanup;
|
||||
}
|
||||
}
|
||||
addr = pci_map_single(dev->pdev, p,
|
||||
sg_count[i], data_dir);
|
||||
addr = dma_map_single(&dev->pdev->dev, p,
|
||||
sg_count[i], data_dir);
|
||||
|
||||
psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
|
||||
psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
|
||||
|
@ -788,8 +788,8 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
|
|||
goto cleanup;
|
||||
}
|
||||
}
|
||||
addr = pci_map_single(dev->pdev, p,
|
||||
sg_count[i], data_dir);
|
||||
addr = dma_map_single(&dev->pdev->dev, p,
|
||||
sg_count[i], data_dir);
|
||||
|
||||
psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
|
||||
psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
|
||||
|
@ -844,7 +844,9 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
|
|||
goto cleanup;
|
||||
}
|
||||
}
|
||||
addr = pci_map_single(dev->pdev, p, usg->sg[i].count, data_dir);
|
||||
addr = dma_map_single(&dev->pdev->dev, p,
|
||||
usg->sg[i].count,
|
||||
data_dir);
|
||||
|
||||
psg->sg[i].addr = cpu_to_le32(addr & 0xffffffff);
|
||||
byte_count += usg->sg[i].count;
|
||||
|
@ -883,8 +885,8 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
|
|||
goto cleanup;
|
||||
}
|
||||
}
|
||||
addr = pci_map_single(dev->pdev, p,
|
||||
sg_count[i], data_dir);
|
||||
addr = dma_map_single(&dev->pdev->dev, p,
|
||||
sg_count[i], data_dir);
|
||||
|
||||
psg->sg[i].addr = cpu_to_le32(addr);
|
||||
byte_count += sg_count[i];
|
||||
|
|
|
@ -1551,6 +1551,7 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
|
|||
aac_fib_map_free(aac);
|
||||
dma_free_coherent(&aac->pdev->dev, aac->comm_size, aac->comm_addr,
|
||||
aac->comm_phys);
|
||||
aac_adapter_ioremap(aac, 0);
|
||||
aac->comm_addr = NULL;
|
||||
aac->comm_phys = 0;
|
||||
kfree(aac->queues);
|
||||
|
@ -1561,15 +1562,15 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced, u8 reset_type)
|
|||
dmamask = DMA_BIT_MASK(32);
|
||||
quirks = aac_get_driver_ident(index)->quirks;
|
||||
if (quirks & AAC_QUIRK_31BIT)
|
||||
retval = pci_set_dma_mask(aac->pdev, dmamask);
|
||||
retval = dma_set_mask(&aac->pdev->dev, dmamask);
|
||||
else if (!(quirks & AAC_QUIRK_SRC))
|
||||
retval = pci_set_dma_mask(aac->pdev, dmamask);
|
||||
retval = dma_set_mask(&aac->pdev->dev, dmamask);
|
||||
else
|
||||
retval = pci_set_consistent_dma_mask(aac->pdev, dmamask);
|
||||
retval = dma_set_coherent_mask(&aac->pdev->dev, dmamask);
|
||||
|
||||
if (quirks & AAC_QUIRK_31BIT && !retval) {
|
||||
dmamask = DMA_BIT_MASK(31);
|
||||
retval = pci_set_consistent_dma_mask(aac->pdev, dmamask);
|
||||
retval = dma_set_coherent_mask(&aac->pdev->dev, dmamask);
|
||||
}
|
||||
|
||||
if (retval)
|
||||
|
|
|
@ -1659,7 +1659,7 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
goto out;
|
||||
|
||||
if (!(aac_drivers[index].quirks & AAC_QUIRK_SRC)) {
|
||||
error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
|
||||
error = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
|
||||
if (error) {
|
||||
dev_err(&pdev->dev, "PCI 32 BIT dma mask set failed");
|
||||
goto out_disable_pdev;
|
||||
|
@ -1678,7 +1678,7 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
mask_bits = 32;
|
||||
}
|
||||
|
||||
error = pci_set_consistent_dma_mask(pdev, dmamask);
|
||||
error = dma_set_coherent_mask(&pdev->dev, dmamask);
|
||||
if (error) {
|
||||
dev_err(&pdev->dev, "PCI %d B consistent dma mask set failed\n"
|
||||
, mask_bits);
|
||||
|
|
|
@ -9402,10 +9402,9 @@ ahd_loadseq(struct ahd_softc *ahd)
|
|||
if (cs_count != 0) {
|
||||
|
||||
cs_count *= sizeof(struct cs);
|
||||
ahd->critical_sections = kmalloc(cs_count, GFP_ATOMIC);
|
||||
ahd->critical_sections = kmemdup(cs_table, cs_count, GFP_ATOMIC);
|
||||
if (ahd->critical_sections == NULL)
|
||||
panic("ahd_loadseq: Could not malloc");
|
||||
memcpy(ahd->critical_sections, cs_table, cs_count);
|
||||
}
|
||||
ahd_outb(ahd, SEQCTL0, PERRORDIS|FAILDIS|FASTMODE);
|
||||
|
||||
|
|
|
@ -952,8 +952,8 @@ int
|
|||
ahd_dmamem_alloc(struct ahd_softc *ahd, bus_dma_tag_t dmat, void** vaddr,
|
||||
int flags, bus_dmamap_t *mapp)
|
||||
{
|
||||
*vaddr = pci_alloc_consistent(ahd->dev_softc,
|
||||
dmat->maxsize, mapp);
|
||||
*vaddr = dma_alloc_coherent(&ahd->dev_softc->dev, dmat->maxsize, mapp,
|
||||
GFP_ATOMIC);
|
||||
if (*vaddr == NULL)
|
||||
return (ENOMEM);
|
||||
return(0);
|
||||
|
@ -963,8 +963,7 @@ void
|
|||
ahd_dmamem_free(struct ahd_softc *ahd, bus_dma_tag_t dmat,
|
||||
void* vaddr, bus_dmamap_t map)
|
||||
{
|
||||
pci_free_consistent(ahd->dev_softc, dmat->maxsize,
|
||||
vaddr, map);
|
||||
dma_free_coherent(&ahd->dev_softc->dev, dmat->maxsize, vaddr, map);
|
||||
}
|
||||
|
||||
int
|
||||
|
|
|
@ -6879,10 +6879,9 @@ ahc_loadseq(struct ahc_softc *ahc)
|
|||
if (cs_count != 0) {
|
||||
|
||||
cs_count *= sizeof(struct cs);
|
||||
ahc->critical_sections = kmalloc(cs_count, GFP_ATOMIC);
|
||||
ahc->critical_sections = kmemdup(cs_table, cs_count, GFP_ATOMIC);
|
||||
if (ahc->critical_sections == NULL)
|
||||
panic("ahc_loadseq: Could not malloc");
|
||||
memcpy(ahc->critical_sections, cs_table, cs_count);
|
||||
}
|
||||
ahc_outb(ahc, SEQCTL, PERRORDIS|FAILDIS|FASTMODE);
|
||||
|
||||
|
|
|
@ -730,7 +730,7 @@ ahc_linux_abort(struct scsi_cmnd *cmd)
|
|||
int error;
|
||||
|
||||
error = ahc_linux_queue_recovery_cmd(cmd, SCB_ABORT);
|
||||
if (error != 0)
|
||||
if (error != SUCCESS)
|
||||
printk("aic7xxx_abort returns 0x%x\n", error);
|
||||
return (error);
|
||||
}
|
||||
|
@ -744,7 +744,7 @@ ahc_linux_dev_reset(struct scsi_cmnd *cmd)
|
|||
int error;
|
||||
|
||||
error = ahc_linux_queue_recovery_cmd(cmd, SCB_DEVICE_RESET);
|
||||
if (error != 0)
|
||||
if (error != SUCCESS)
|
||||
printk("aic7xxx_dev_reset returns 0x%x\n", error);
|
||||
return (error);
|
||||
}
|
||||
|
|
|
@ -42,14 +42,6 @@
|
|||
extern struct kmem_cache *asd_dma_token_cache;
|
||||
extern struct kmem_cache *asd_ascb_cache;
|
||||
|
||||
static inline void asd_stringify_sas_addr(char *p, const u8 *sas_addr)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < SAS_ADDR_SIZE; i++, p += 2)
|
||||
snprintf(p, 3, "%02X", sas_addr[i]);
|
||||
*p = '\0';
|
||||
}
|
||||
|
||||
struct asd_ha_struct;
|
||||
struct asd_ascb;
|
||||
|
||||
|
|
|
@ -166,14 +166,15 @@ cumanascsi_2_dma_setup(struct Scsi_Host *host, struct scsi_pointer *SCp,
|
|||
|
||||
bufs = copy_SCp_to_sg(&info->sg[0], SCp, NR_SG);
|
||||
|
||||
if (direction == DMA_OUT)
|
||||
map_dir = DMA_TO_DEVICE,
|
||||
dma_dir = DMA_MODE_WRITE,
|
||||
if (direction == DMA_OUT) {
|
||||
map_dir = DMA_TO_DEVICE;
|
||||
dma_dir = DMA_MODE_WRITE;
|
||||
alatch_dir = ALATCH_DMA_OUT;
|
||||
else
|
||||
map_dir = DMA_FROM_DEVICE,
|
||||
dma_dir = DMA_MODE_READ,
|
||||
} else {
|
||||
map_dir = DMA_FROM_DEVICE;
|
||||
dma_dir = DMA_MODE_READ;
|
||||
alatch_dir = ALATCH_DMA_IN;
|
||||
}
|
||||
|
||||
dma_map_sg(dev, info->sg, bufs, map_dir);
|
||||
|
||||
|
@ -326,10 +327,12 @@ cumanascsi_2_set_proc_info(struct Scsi_Host *host, char *buffer, int length)
|
|||
cumanascsi_2_terminator_ctl(host, 0);
|
||||
else
|
||||
ret = -EINVAL;
|
||||
} else
|
||||
} else {
|
||||
ret = -EINVAL;
|
||||
} else
|
||||
}
|
||||
} else {
|
||||
ret = -EINVAL;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -165,12 +165,13 @@ eesoxscsi_dma_setup(struct Scsi_Host *host, struct scsi_pointer *SCp,
|
|||
|
||||
bufs = copy_SCp_to_sg(&info->sg[0], SCp, NR_SG);
|
||||
|
||||
if (direction == DMA_OUT)
|
||||
map_dir = DMA_TO_DEVICE,
|
||||
if (direction == DMA_OUT) {
|
||||
map_dir = DMA_TO_DEVICE;
|
||||
dma_dir = DMA_MODE_WRITE;
|
||||
else
|
||||
map_dir = DMA_FROM_DEVICE,
|
||||
} else {
|
||||
map_dir = DMA_FROM_DEVICE;
|
||||
dma_dir = DMA_MODE_READ;
|
||||
}
|
||||
|
||||
dma_map_sg(dev, info->sg, bufs, map_dir);
|
||||
|
||||
|
|
|
@ -120,7 +120,7 @@ static struct scsi_host_template oakscsi_template = {
|
|||
static int oakscsi_probe(struct expansion_card *ec, const struct ecard_id *id)
|
||||
{
|
||||
struct Scsi_Host *host;
|
||||
int ret = -ENOMEM;
|
||||
int ret;
|
||||
|
||||
ret = ecard_request_resources(ec);
|
||||
if (ret)
|
||||
|
|
|
@ -138,12 +138,13 @@ powertecscsi_dma_setup(struct Scsi_Host *host, struct scsi_pointer *SCp,
|
|||
|
||||
bufs = copy_SCp_to_sg(&info->sg[0], SCp, NR_SG);
|
||||
|
||||
if (direction == DMA_OUT)
|
||||
map_dir = DMA_TO_DEVICE,
|
||||
if (direction == DMA_OUT) {
|
||||
map_dir = DMA_TO_DEVICE;
|
||||
dma_dir = DMA_MODE_WRITE;
|
||||
else
|
||||
map_dir = DMA_FROM_DEVICE,
|
||||
} else {
|
||||
map_dir = DMA_FROM_DEVICE;
|
||||
dma_dir = DMA_MODE_READ;
|
||||
}
|
||||
|
||||
dma_map_sg(dev, info->sg, bufs, map_dir);
|
||||
|
||||
|
|
|
@ -3020,6 +3020,7 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
|
|||
goto create_eq_error;
|
||||
}
|
||||
|
||||
mem->dma = paddr;
|
||||
mem->va = eq_vaddress;
|
||||
ret = be_fill_queue(eq, phba->params.num_eq_entries,
|
||||
sizeof(struct be_eq_entry), eq_vaddress);
|
||||
|
@ -3029,7 +3030,6 @@ static int beiscsi_create_eqs(struct beiscsi_hba *phba,
|
|||
goto create_eq_error;
|
||||
}
|
||||
|
||||
mem->dma = paddr;
|
||||
ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
|
||||
BEISCSI_EQ_DELAY_DEF);
|
||||
if (ret) {
|
||||
|
@ -3086,6 +3086,7 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
|
|||
goto create_cq_error;
|
||||
}
|
||||
|
||||
mem->dma = paddr;
|
||||
ret = be_fill_queue(cq, phba->params.num_cq_entries,
|
||||
sizeof(struct sol_cqe), cq_vaddress);
|
||||
if (ret) {
|
||||
|
@ -3095,7 +3096,6 @@ static int beiscsi_create_cqs(struct beiscsi_hba *phba,
|
|||
goto create_cq_error;
|
||||
}
|
||||
|
||||
mem->dma = paddr;
|
||||
ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false,
|
||||
false, 0);
|
||||
if (ret) {
|
||||
|
|
|
@ -50,7 +50,7 @@ struct workqueue_struct *bnx2fc_wq;
|
|||
* Here the io threads are per cpu but the l2 thread is just one
|
||||
*/
|
||||
struct fcoe_percpu_s bnx2fc_global;
|
||||
DEFINE_SPINLOCK(bnx2fc_global_lock);
|
||||
static DEFINE_SPINLOCK(bnx2fc_global_lock);
|
||||
|
||||
static struct cnic_ulp_ops bnx2fc_cnic_cb;
|
||||
static struct libfc_function_template bnx2fc_libfc_fcn_templ;
|
||||
|
@ -108,22 +108,22 @@ MODULE_PARM_DESC(debug_logging,
|
|||
"\t\t0x10 - fcoe L2 fame related logs.\n"
|
||||
"\t\t0xff - LOG all messages.");
|
||||
|
||||
uint bnx2fc_devloss_tmo;
|
||||
static uint bnx2fc_devloss_tmo;
|
||||
module_param_named(devloss_tmo, bnx2fc_devloss_tmo, uint, S_IRUGO);
|
||||
MODULE_PARM_DESC(devloss_tmo, " Change devloss_tmo for the remote ports "
|
||||
"attached via bnx2fc.");
|
||||
|
||||
uint bnx2fc_max_luns = BNX2FC_MAX_LUN;
|
||||
static uint bnx2fc_max_luns = BNX2FC_MAX_LUN;
|
||||
module_param_named(max_luns, bnx2fc_max_luns, uint, S_IRUGO);
|
||||
MODULE_PARM_DESC(max_luns, " Change the default max_lun per SCSI host. Default "
|
||||
"0xffff.");
|
||||
|
||||
uint bnx2fc_queue_depth;
|
||||
static uint bnx2fc_queue_depth;
|
||||
module_param_named(queue_depth, bnx2fc_queue_depth, uint, S_IRUGO);
|
||||
MODULE_PARM_DESC(queue_depth, " Change the default queue depth of SCSI devices "
|
||||
"attached via bnx2fc.");
|
||||
|
||||
uint bnx2fc_log_fka;
|
||||
static uint bnx2fc_log_fka;
|
||||
module_param_named(log_fka, bnx2fc_log_fka, uint, S_IRUGO|S_IWUSR);
|
||||
MODULE_PARM_DESC(log_fka, " Print message to kernel log when fcoe is "
|
||||
"initiating a FIP keep alive when debug logging is enabled.");
|
||||
|
|
|
@ -864,7 +864,7 @@ int bnx2fc_initiate_abts(struct bnx2fc_cmd *io_req)
|
|||
|
||||
abts_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_ABTS);
|
||||
if (!abts_io_req) {
|
||||
printk(KERN_ERR PFX "abts: couldnt allocate cmd\n");
|
||||
printk(KERN_ERR PFX "abts: couldn't allocate cmd\n");
|
||||
rc = FAILED;
|
||||
goto abts_err;
|
||||
}
|
||||
|
@ -957,7 +957,7 @@ int bnx2fc_initiate_seq_cleanup(struct bnx2fc_cmd *orig_io_req, u32 offset,
|
|||
|
||||
seq_clnp_req = bnx2fc_elstm_alloc(tgt, BNX2FC_SEQ_CLEANUP);
|
||||
if (!seq_clnp_req) {
|
||||
printk(KERN_ERR PFX "cleanup: couldnt allocate cmd\n");
|
||||
printk(KERN_ERR PFX "cleanup: couldn't allocate cmd\n");
|
||||
rc = -ENOMEM;
|
||||
kfree(cb_arg);
|
||||
goto cleanup_err;
|
||||
|
@ -1015,7 +1015,7 @@ int bnx2fc_initiate_cleanup(struct bnx2fc_cmd *io_req)
|
|||
|
||||
cleanup_io_req = bnx2fc_elstm_alloc(tgt, BNX2FC_CLEANUP);
|
||||
if (!cleanup_io_req) {
|
||||
printk(KERN_ERR PFX "cleanup: couldnt allocate cmd\n");
|
||||
printk(KERN_ERR PFX "cleanup: couldn't allocate cmd\n");
|
||||
rc = -1;
|
||||
goto cleanup_err;
|
||||
}
|
||||
|
|
|
@ -474,8 +474,6 @@ static int __init bnx2i_mod_init(void)
|
|||
if (sq_size && !is_power_of_2(sq_size))
|
||||
sq_size = roundup_pow_of_two(sq_size);
|
||||
|
||||
mutex_init(&bnx2i_dev_lock);
|
||||
|
||||
bnx2i_scsi_xport_template =
|
||||
iscsi_register_transport(&bnx2i_iscsi_transport);
|
||||
if (!bnx2i_scsi_xport_template) {
|
||||
|
|
|
@ -2384,7 +2384,7 @@ static int csio_hw_prep_fw(struct csio_hw *hw, struct fw_info *fw_info,
|
|||
FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c),
|
||||
FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
|
||||
FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
|
||||
ret = EINVAL;
|
||||
ret = -EINVAL;
|
||||
goto bye;
|
||||
}
|
||||
|
||||
|
|
|
@ -933,14 +933,14 @@ csio_scsis_aborting(struct csio_ioreq *req, enum csio_scsi_ev evt)
|
|||
* abort for that I/O by the FW crossed each other.
|
||||
* The FW returned FW_EINVAL. The original I/O would have
|
||||
* returned with FW_SUCCESS or any other SCSI error.
|
||||
* 3. The FW couldnt sent the abort out on the wire, as there
|
||||
* 3. The FW couldn't sent the abort out on the wire, as there
|
||||
* was an I-T nexus loss (link down, remote device logged
|
||||
* out etc). FW sent back an appropriate IT nexus loss status
|
||||
* for the abort.
|
||||
* 4. FW sent an abort, but abort timed out (remote device
|
||||
* didnt respond). FW replied back with
|
||||
* FW_SCSI_ABORT_TIMEDOUT.
|
||||
* 5. FW couldnt genuinely abort the request for some reason,
|
||||
* 5. FW couldn't genuinely abort the request for some reason,
|
||||
* and sent us an error.
|
||||
*
|
||||
* The first 3 scenarios are treated as succesful abort
|
||||
|
@ -1859,7 +1859,7 @@ csio_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmnd)
|
|||
spin_unlock_irqrestore(&hw->lock, flags);
|
||||
|
||||
if (retval != 0) {
|
||||
csio_err(hw, "ioreq: %p couldnt be started, status:%d\n",
|
||||
csio_err(hw, "ioreq: %p couldn't be started, status:%d\n",
|
||||
ioreq, retval);
|
||||
CSIO_INC_STATS(scsim, n_busy_error);
|
||||
goto err_put_req;
|
||||
|
|
|
@ -77,9 +77,9 @@ int cxgbi_device_portmap_create(struct cxgbi_device *cdev, unsigned int base,
|
|||
{
|
||||
struct cxgbi_ports_map *pmap = &cdev->pmap;
|
||||
|
||||
pmap->port_csk = cxgbi_alloc_big_mem(max_conn *
|
||||
sizeof(struct cxgbi_sock *),
|
||||
GFP_KERNEL);
|
||||
pmap->port_csk = kvzalloc(array_size(max_conn,
|
||||
sizeof(struct cxgbi_sock *)),
|
||||
GFP_KERNEL | __GFP_NOWARN);
|
||||
if (!pmap->port_csk) {
|
||||
pr_warn("cdev 0x%p, portmap OOM %u.\n", cdev, max_conn);
|
||||
return -ENOMEM;
|
||||
|
@ -124,7 +124,7 @@ static inline void cxgbi_device_destroy(struct cxgbi_device *cdev)
|
|||
if (cdev->cdev2ppm)
|
||||
cxgbi_ppm_release(cdev->cdev2ppm(cdev));
|
||||
if (cdev->pmap.max_connect)
|
||||
cxgbi_free_big_mem(cdev->pmap.port_csk);
|
||||
kvfree(cdev->pmap.port_csk);
|
||||
kfree(cdev);
|
||||
}
|
||||
|
||||
|
|
|
@ -575,22 +575,6 @@ struct cxgbi_iso_info {
|
|||
u32 buffer_offset;
|
||||
};
|
||||
|
||||
static inline void *cxgbi_alloc_big_mem(unsigned int size,
|
||||
gfp_t gfp)
|
||||
{
|
||||
void *p = kzalloc(size, gfp | __GFP_NOWARN);
|
||||
|
||||
if (!p)
|
||||
p = vzalloc(size);
|
||||
|
||||
return p;
|
||||
}
|
||||
|
||||
static inline void cxgbi_free_big_mem(void *addr)
|
||||
{
|
||||
kvfree(addr);
|
||||
}
|
||||
|
||||
static inline void cxgbi_set_iscsi_ipv4(struct cxgbi_hba *chba, __be32 ipaddr)
|
||||
{
|
||||
if (chba->cdev->flags & CXGBI_FLAG_IPV4_SET)
|
||||
|
|
|
@ -902,7 +902,7 @@ static void build_srb(struct scsi_cmnd *cmd, struct DeviceCtlBlk *dcb,
|
|||
nseg = scsi_dma_map(cmd);
|
||||
BUG_ON(nseg < 0);
|
||||
|
||||
if (dir == PCI_DMA_NONE || !nseg) {
|
||||
if (dir == DMA_NONE || !nseg) {
|
||||
dprintkdbg(DBG_0,
|
||||
"build_srb: [0] len=%d buf=%p use_sg=%d !MAP=%08x\n",
|
||||
cmd->bufflen, scsi_sglist(cmd), scsi_sg_count(cmd),
|
||||
|
@ -3135,7 +3135,7 @@ static void pci_unmap_srb(struct AdapterCtlBlk *acb, struct ScsiReqBlk *srb)
|
|||
struct scsi_cmnd *cmd = srb->cmd;
|
||||
enum dma_data_direction dir = cmd->sc_data_direction;
|
||||
|
||||
if (scsi_sg_count(cmd) && dir != PCI_DMA_NONE) {
|
||||
if (scsi_sg_count(cmd) && dir != DMA_NONE) {
|
||||
/* unmap DC395x SG list */
|
||||
dprintkdbg(DBG_SG, "pci_unmap_srb: list=%08x(%05x)\n",
|
||||
srb->sg_bus_addr, SEGMENTX_LEN);
|
||||
|
@ -3333,7 +3333,7 @@ static void srb_done(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb,
|
|||
|
||||
if (!ckc_only && (cmd->result & RES_DID) == 0
|
||||
&& cmd->cmnd[2] == 0 && scsi_bufflen(cmd) >= 8
|
||||
&& dir != PCI_DMA_NONE && ptr && (ptr->Vers & 0x07) >= 2)
|
||||
&& dir != DMA_NONE && ptr && (ptr->Vers & 0x07) >= 2)
|
||||
dcb->inquiry7 = ptr->Flags;
|
||||
|
||||
/*if( srb->cmd->cmnd[0] == INQUIRY && */
|
||||
|
@ -4504,14 +4504,8 @@ static int dc395x_show_info(struct seq_file *m, struct Scsi_Host *host)
|
|||
/*seq_printf(m, "\n"); */
|
||||
|
||||
seq_printf(m, "Nr of DCBs: %i\n", list_size(&acb->dcb_list));
|
||||
seq_printf(m, "Map of attached LUNs: %02x %02x %02x %02x %02x %02x %02x %02x\n",
|
||||
acb->dcb_map[0], acb->dcb_map[1], acb->dcb_map[2],
|
||||
acb->dcb_map[3], acb->dcb_map[4], acb->dcb_map[5],
|
||||
acb->dcb_map[6], acb->dcb_map[7]);
|
||||
seq_printf(m, " %02x %02x %02x %02x %02x %02x %02x %02x\n",
|
||||
acb->dcb_map[8], acb->dcb_map[9], acb->dcb_map[10],
|
||||
acb->dcb_map[11], acb->dcb_map[12], acb->dcb_map[13],
|
||||
acb->dcb_map[14], acb->dcb_map[15]);
|
||||
seq_printf(m, "Map of attached LUNs: %8ph\n", &acb->dcb_map[0]);
|
||||
seq_printf(m, " %8ph\n", &acb->dcb_map[8]);
|
||||
|
||||
seq_puts(m,
|
||||
"Un ID LUN Prty Sync Wide DsCn SndS TagQ nego_period SyncFreq SyncOffs MaxCmd\n");
|
||||
|
|
|
@ -408,9 +408,6 @@ static void adpt_inquiry(adpt_hba* pHba)
|
|||
static int adpt_slave_configure(struct scsi_device * device)
|
||||
{
|
||||
struct Scsi_Host *host = device->host;
|
||||
adpt_hba* pHba;
|
||||
|
||||
pHba = (adpt_hba *) host->hostdata[0];
|
||||
|
||||
if (host->can_queue && device->tagged_supported) {
|
||||
scsi_change_queue_depth(device,
|
||||
|
|
|
@ -1548,11 +1548,10 @@ static int allocate_fw_buffers(struct esas2r_adapter *a, u32 length)
|
|||
|
||||
a->firmware.orig_len = length;
|
||||
|
||||
a->firmware.data = (u8 *)dma_alloc_coherent(&a->pcid->dev,
|
||||
(size_t)length,
|
||||
(dma_addr_t *)&a->firmware.
|
||||
phys,
|
||||
GFP_KERNEL);
|
||||
a->firmware.data = dma_alloc_coherent(&a->pcid->dev,
|
||||
(size_t)length,
|
||||
(dma_addr_t *)&a->firmware.phys,
|
||||
GFP_KERNEL);
|
||||
|
||||
if (!a->firmware.data) {
|
||||
esas2r_debug("buffer alloc failed!");
|
||||
|
@ -1895,11 +1894,11 @@ int esas2r_write_vda(struct esas2r_adapter *a, const char *buf, long off,
|
|||
|
||||
if (!a->vda_buffer) {
|
||||
dma_addr_t dma_addr;
|
||||
a->vda_buffer = (u8 *)dma_alloc_coherent(&a->pcid->dev,
|
||||
(size_t)
|
||||
VDA_MAX_BUFFER_SIZE,
|
||||
&dma_addr,
|
||||
GFP_KERNEL);
|
||||
a->vda_buffer = dma_alloc_coherent(&a->pcid->dev,
|
||||
(size_t)
|
||||
VDA_MAX_BUFFER_SIZE,
|
||||
&dma_addr,
|
||||
GFP_KERNEL);
|
||||
|
||||
a->ppvda_buffer = dma_addr;
|
||||
}
|
||||
|
@ -2064,11 +2063,10 @@ int esas2r_write_fs(struct esas2r_adapter *a, const char *buf, long off,
|
|||
re_allocate_buffer:
|
||||
a->fs_api_buffer_size = length;
|
||||
|
||||
a->fs_api_buffer = (u8 *)dma_alloc_coherent(
|
||||
&a->pcid->dev,
|
||||
(size_t)a->fs_api_buffer_size,
|
||||
(dma_addr_t *)&a->ppfs_api_buffer,
|
||||
GFP_KERNEL);
|
||||
a->fs_api_buffer = dma_alloc_coherent(&a->pcid->dev,
|
||||
(size_t)a->fs_api_buffer_size,
|
||||
(dma_addr_t *)&a->ppfs_api_buffer,
|
||||
GFP_KERNEL);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -111,12 +111,11 @@ static int fdomain_isa_match(struct device *dev, unsigned int ndev)
|
|||
base = readb(p + sig->base_offset) +
|
||||
(readb(p + sig->base_offset + 1) << 8);
|
||||
iounmap(p);
|
||||
if (base)
|
||||
if (base) {
|
||||
dev_info(dev, "BIOS at 0x%lx specifies I/O base 0x%x\n",
|
||||
bios_base, base);
|
||||
else
|
||||
} else { /* no I/O base in BIOS area */
|
||||
dev_info(dev, "BIOS at 0x%lx\n", bios_base);
|
||||
if (!base) { /* no I/O base in BIOS area */
|
||||
/* save BIOS signature for later use in port probing */
|
||||
saved_sig = sig;
|
||||
return 0;
|
||||
|
|
|
@ -120,11 +120,11 @@ static ssize_t fnic_trace_ctrl_read(struct file *filp,
|
|||
len = 0;
|
||||
trace_type = (u8 *)filp->private_data;
|
||||
if (*trace_type == fc_trc_flag->fnic_trace)
|
||||
len = sprintf(buf, "%u\n", fnic_tracing_enabled);
|
||||
len = sprintf(buf, "%d\n", fnic_tracing_enabled);
|
||||
else if (*trace_type == fc_trc_flag->fc_trace)
|
||||
len = sprintf(buf, "%u\n", fnic_fc_tracing_enabled);
|
||||
len = sprintf(buf, "%d\n", fnic_fc_tracing_enabled);
|
||||
else if (*trace_type == fc_trc_flag->fc_clear)
|
||||
len = sprintf(buf, "%u\n", fnic_fc_trace_cleared);
|
||||
len = sprintf(buf, "%d\n", fnic_fc_trace_cleared);
|
||||
else
|
||||
pr_err("fnic: Cannot read to any debugfs file\n");
|
||||
|
||||
|
|
|
@ -309,12 +309,10 @@ static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip,
|
|||
struct fc_frame_header *fh = NULL;
|
||||
struct fip_desc *desc;
|
||||
struct fip_encaps *els;
|
||||
enum fip_desc_type els_dtype = 0;
|
||||
u16 op;
|
||||
u8 els_op;
|
||||
u8 sub;
|
||||
|
||||
size_t els_len = 0;
|
||||
size_t rlen;
|
||||
size_t dlen = 0;
|
||||
|
||||
|
@ -346,10 +344,8 @@ static inline int is_fnic_fip_flogi_reject(struct fcoe_ctlr *fip,
|
|||
if (dlen < sizeof(*els) + sizeof(*fh) + 1)
|
||||
return 0;
|
||||
|
||||
els_len = dlen - sizeof(*els);
|
||||
els = (struct fip_encaps *)desc;
|
||||
fh = (struct fc_frame_header *)(els + 1);
|
||||
els_dtype = desc->fip_dtype;
|
||||
|
||||
if (!fh)
|
||||
return 0;
|
||||
|
@ -376,7 +372,6 @@ static void fnic_fcoe_send_vlan_req(struct fnic *fnic)
|
|||
struct fnic_stats *fnic_stats = &fnic->fnic_stats;
|
||||
struct sk_buff *skb;
|
||||
char *eth_fr;
|
||||
int fr_len;
|
||||
struct fip_vlan *vlan;
|
||||
u64 vlan_tov;
|
||||
|
||||
|
@ -391,7 +386,6 @@ static void fnic_fcoe_send_vlan_req(struct fnic *fnic)
|
|||
if (!skb)
|
||||
return;
|
||||
|
||||
fr_len = sizeof(*vlan);
|
||||
eth_fr = (char *)skb->data;
|
||||
vlan = (struct fip_vlan *)eth_fr;
|
||||
|
||||
|
@ -837,7 +831,6 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
|
|||
struct sk_buff *skb;
|
||||
struct fc_frame *fp;
|
||||
struct fnic_stats *fnic_stats = &fnic->fnic_stats;
|
||||
unsigned int eth_hdrs_stripped;
|
||||
u8 type, color, eop, sop, ingress_port, vlan_stripped;
|
||||
u8 fcoe = 0, fcoe_sof, fcoe_eof;
|
||||
u8 fcoe_fc_crc_ok = 1, fcoe_enc_error = 0;
|
||||
|
@ -867,7 +860,6 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
|
|||
&ingress_port, &packet_error,
|
||||
&fcoe_enc_error, &fcs_ok, &vlan_stripped,
|
||||
&vlan);
|
||||
eth_hdrs_stripped = 1;
|
||||
skb_trim(skb, fcp_bytes_written);
|
||||
fr_sof(fp) = sof;
|
||||
fr_eof(fp) = eof;
|
||||
|
@ -884,7 +876,6 @@ static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc
|
|||
&tcp_udp_csum_ok, &udp, &tcp,
|
||||
&ipv4_csum_ok, &ipv6, &ipv4,
|
||||
&ipv4_fragment, &fcs_ok);
|
||||
eth_hdrs_stripped = 0;
|
||||
skb_trim(skb, bytes_written);
|
||||
if (!fcs_ok) {
|
||||
atomic64_inc(&fnic_stats->misc_stats.frame_errors);
|
||||
|
|
|
@ -443,7 +443,7 @@ static void fnic_notify_timer_start(struct fnic *fnic)
|
|||
default:
|
||||
/* Using intr for notification for INTx/MSI-X */
|
||||
break;
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
static int fnic_dev_wait(struct vnic_dev *vdev,
|
||||
|
@ -552,8 +552,7 @@ static u8 *fnic_get_mac(struct fc_lport *lport)
|
|||
|
||||
static void fnic_set_vlan(struct fnic *fnic, u16 vlan_id)
|
||||
{
|
||||
u16 old_vlan;
|
||||
old_vlan = vnic_dev_set_default_vlan(fnic->vdev, vlan_id);
|
||||
vnic_dev_set_default_vlan(fnic->vdev, vlan_id);
|
||||
}
|
||||
|
||||
static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
|
|
|
@ -1402,7 +1402,7 @@ static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
|
|||
}
|
||||
if (!io_req) {
|
||||
spin_unlock_irqrestore(io_lock, flags);
|
||||
goto cleanup_scsi_cmd;
|
||||
continue;
|
||||
}
|
||||
|
||||
CMD_SP(sc) = NULL;
|
||||
|
@ -1417,7 +1417,6 @@ static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
|
|||
fnic_release_ioreq_buf(fnic, io_req, sc);
|
||||
mempool_free(io_req, fnic->io_req_pool);
|
||||
|
||||
cleanup_scsi_cmd:
|
||||
sc->result = DID_TRANSPORT_DISRUPTED << 16;
|
||||
FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
|
||||
"%s: tag:0x%x : sc:0x%p duration = %lu DID_TRANSPORT_DISRUPTED\n",
|
||||
|
|
|
@ -3007,7 +3007,6 @@ static char *async_cache_tab[] = {
|
|||
static int gdth_async_event(gdth_ha_str *ha)
|
||||
{
|
||||
gdth_cmd_str *cmdp;
|
||||
int cmd_index;
|
||||
|
||||
cmdp= ha->pccb;
|
||||
TRACE2(("gdth_async_event() ha %d serv %d\n",
|
||||
|
@ -3019,7 +3018,6 @@ static int gdth_async_event(gdth_ha_str *ha)
|
|||
gdth_delay(0);
|
||||
cmdp->Service = SCREENSERVICE;
|
||||
cmdp->RequestBuffer = SCREEN_CMND;
|
||||
cmd_index = gdth_get_cmd_index(ha);
|
||||
gdth_set_sema0(ha);
|
||||
cmdp->OpCode = GDT_READ;
|
||||
cmdp->BoardNode = LOCALBOARD;
|
||||
|
|
|
@ -15,5 +15,6 @@ config SCSI_HISI_SAS_PCI
|
|||
tristate "HiSilicon SAS on PCI bus"
|
||||
depends on SCSI_HISI_SAS
|
||||
depends on PCI
|
||||
depends on ACPI
|
||||
help
|
||||
This driver supports HiSilicon's SAS HBA based on PCI device
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#include <linux/of_address.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/property.h>
|
||||
#include <linux/regmap.h>
|
||||
#include <linux/timer.h>
|
||||
|
@ -34,6 +35,7 @@
|
|||
#define HISI_SAS_MAX_DEVICES HISI_SAS_MAX_ITCT_ENTRIES
|
||||
#define HISI_SAS_RESET_BIT 0
|
||||
#define HISI_SAS_REJECT_CMD_BIT 1
|
||||
#define HISI_SAS_PM_BIT 2
|
||||
#define HISI_SAS_MAX_COMMANDS (HISI_SAS_QUEUE_SLOTS)
|
||||
#define HISI_SAS_RESERVED_IPTT 96
|
||||
#define HISI_SAS_UNRESERVED_IPTT \
|
||||
|
@ -275,6 +277,39 @@ enum hisi_sas_debugfs_cache_type {
|
|||
HISI_SAS_IOST_CACHE,
|
||||
};
|
||||
|
||||
enum hisi_sas_debugfs_bist_ffe_cfg {
|
||||
FFE_SAS_1_5_GBPS,
|
||||
FFE_SAS_3_0_GBPS,
|
||||
FFE_SAS_6_0_GBPS,
|
||||
FFE_SAS_12_0_GBPS,
|
||||
FFE_RESV,
|
||||
FFE_SATA_1_5_GBPS,
|
||||
FFE_SATA_3_0_GBPS,
|
||||
FFE_SATA_6_0_GBPS,
|
||||
FFE_CFG_MAX
|
||||
};
|
||||
|
||||
enum hisi_sas_debugfs_bist_fixed_code {
|
||||
FIXED_CODE,
|
||||
FIXED_CODE_1,
|
||||
FIXED_CODE_MAX
|
||||
};
|
||||
|
||||
enum {
|
||||
HISI_SAS_BIST_CODE_MODE_PRBS7,
|
||||
HISI_SAS_BIST_CODE_MODE_PRBS23,
|
||||
HISI_SAS_BIST_CODE_MODE_PRBS31,
|
||||
HISI_SAS_BIST_CODE_MODE_JTPAT,
|
||||
HISI_SAS_BIST_CODE_MODE_CJTPAT,
|
||||
HISI_SAS_BIST_CODE_MODE_SCRAMBED_0,
|
||||
HISI_SAS_BIST_CODE_MODE_TRAIN,
|
||||
HISI_SAS_BIST_CODE_MODE_TRAIN_DONE,
|
||||
HISI_SAS_BIST_CODE_MODE_HFTP,
|
||||
HISI_SAS_BIST_CODE_MODE_MFTP,
|
||||
HISI_SAS_BIST_CODE_MODE_LFTP,
|
||||
HISI_SAS_BIST_CODE_MODE_FIXED_DATA,
|
||||
};
|
||||
|
||||
struct hisi_sas_hw {
|
||||
int (*hw_init)(struct hisi_hba *hisi_hba);
|
||||
void (*setup_itct)(struct hisi_hba *hisi_hba,
|
||||
|
@ -441,6 +476,8 @@ struct hisi_hba {
|
|||
int debugfs_bist_mode;
|
||||
u32 debugfs_bist_cnt;
|
||||
int debugfs_bist_enable;
|
||||
u32 debugfs_bist_ffe[HISI_SAS_MAX_PHYS][FFE_CFG_MAX];
|
||||
u32 debugfs_bist_fixed_code[FIXED_CODE_MAX];
|
||||
|
||||
/* debugfs memories */
|
||||
/* Put Global AXI and RAS Register into register array */
|
||||
|
|
|
@ -229,17 +229,18 @@ void hisi_sas_slot_task_free(struct hisi_hba *hisi_hba, struct sas_task *task,
|
|||
task->lldd_task = NULL;
|
||||
|
||||
if (!sas_protocol_ata(task->task_proto)) {
|
||||
struct sas_ssp_task *ssp_task = &task->ssp_task;
|
||||
struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
|
||||
|
||||
if (slot->n_elem)
|
||||
dma_unmap_sg(dev, task->scatter,
|
||||
task->num_scatter,
|
||||
task->data_dir);
|
||||
if (slot->n_elem_dif)
|
||||
if (slot->n_elem_dif) {
|
||||
struct sas_ssp_task *ssp_task = &task->ssp_task;
|
||||
struct scsi_cmnd *scsi_cmnd = ssp_task->cmd;
|
||||
|
||||
dma_unmap_sg(dev, scsi_prot_sglist(scsi_cmnd),
|
||||
scsi_prot_sg_count(scsi_cmnd),
|
||||
task->data_dir);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -334,7 +335,7 @@ static int hisi_sas_dma_map(struct hisi_hba *hisi_hba,
|
|||
}
|
||||
|
||||
if (*n_elem > HISI_SAS_SGE_PAGE_CNT) {
|
||||
dev_err(dev, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT",
|
||||
dev_err(dev, "task prep: n_elem(%d) > HISI_SAS_SGE_PAGE_CNT\n",
|
||||
*n_elem);
|
||||
rc = -EINVAL;
|
||||
goto err_out_dma_unmap;
|
||||
|
@ -620,6 +621,12 @@ static void hisi_sas_bytes_dmaed(struct hisi_hba *hisi_hba, int phy_no)
|
|||
if (!phy->phy_attached)
|
||||
return;
|
||||
|
||||
if (test_bit(HISI_SAS_PM_BIT, &hisi_hba->flags) &&
|
||||
!sas_phy->suspended) {
|
||||
dev_warn(hisi_hba->dev, "phy%d during suspend filtered out\n", phy_no);
|
||||
return;
|
||||
}
|
||||
|
||||
sas_ha = &hisi_hba->sha;
|
||||
sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
|
||||
|
||||
|
@ -1431,7 +1438,6 @@ static void hisi_sas_rescan_topology(struct hisi_hba *hisi_hba, u32 state)
|
|||
} else {
|
||||
hisi_sas_phy_down(hisi_hba, phy_no, 0);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1547,7 +1553,6 @@ EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_prepare);
|
|||
void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba)
|
||||
{
|
||||
struct Scsi_Host *shost = hisi_hba->shost;
|
||||
u32 state;
|
||||
|
||||
/* Init and wait for PHYs to come up and all libsas event finished. */
|
||||
hisi_hba->hw->phys_init(hisi_hba);
|
||||
|
@ -1562,8 +1567,7 @@ void hisi_sas_controller_reset_done(struct hisi_hba *hisi_hba)
|
|||
scsi_unblock_requests(shost);
|
||||
clear_bit(HISI_SAS_RESET_BIT, &hisi_hba->flags);
|
||||
|
||||
state = hisi_hba->hw->get_phys_state(hisi_hba);
|
||||
hisi_sas_rescan_topology(hisi_hba, state);
|
||||
hisi_sas_rescan_topology(hisi_hba, hisi_hba->phy_state);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hisi_sas_controller_reset_done);
|
||||
|
||||
|
@ -3335,21 +3339,6 @@ enum {
|
|||
HISI_SAS_BIST_LOOPBACK_MODE_REMOTE,
|
||||
};
|
||||
|
||||
enum {
|
||||
HISI_SAS_BIST_CODE_MODE_PRBS7 = 0,
|
||||
HISI_SAS_BIST_CODE_MODE_PRBS23,
|
||||
HISI_SAS_BIST_CODE_MODE_PRBS31,
|
||||
HISI_SAS_BIST_CODE_MODE_JTPAT,
|
||||
HISI_SAS_BIST_CODE_MODE_CJTPAT,
|
||||
HISI_SAS_BIST_CODE_MODE_SCRAMBED_0,
|
||||
HISI_SAS_BIST_CODE_MODE_TRAIN,
|
||||
HISI_SAS_BIST_CODE_MODE_TRAIN_DONE,
|
||||
HISI_SAS_BIST_CODE_MODE_HFTP,
|
||||
HISI_SAS_BIST_CODE_MODE_MFTP,
|
||||
HISI_SAS_BIST_CODE_MODE_LFTP,
|
||||
HISI_SAS_BIST_CODE_MODE_FIXED_DATA,
|
||||
};
|
||||
|
||||
static const struct {
|
||||
int value;
|
||||
char *name;
|
||||
|
@ -3705,6 +3694,58 @@ static const struct file_operations hisi_sas_debugfs_bist_enable_ops = {
|
|||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
static const struct {
|
||||
char *name;
|
||||
} hisi_sas_debugfs_ffe_name[FFE_CFG_MAX] = {
|
||||
{ "SAS_1_5_GBPS" },
|
||||
{ "SAS_3_0_GBPS" },
|
||||
{ "SAS_6_0_GBPS" },
|
||||
{ "SAS_12_0_GBPS" },
|
||||
{ "FFE_RESV" },
|
||||
{ "SATA_1_5_GBPS" },
|
||||
{ "SATA_3_0_GBPS" },
|
||||
{ "SATA_6_0_GBPS" },
|
||||
};
|
||||
|
||||
static ssize_t hisi_sas_debugfs_write(struct file *filp,
|
||||
const char __user *buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
struct seq_file *m = filp->private_data;
|
||||
u32 *val = m->private;
|
||||
int res;
|
||||
|
||||
res = kstrtouint_from_user(buf, count, 0, val);
|
||||
if (res)
|
||||
return res;
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static int hisi_sas_debugfs_show(struct seq_file *s, void *p)
|
||||
{
|
||||
u32 *val = s->private;
|
||||
|
||||
seq_printf(s, "0x%x\n", *val);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hisi_sas_debugfs_open(struct inode *inode, struct file *filp)
|
||||
{
|
||||
return single_open(filp, hisi_sas_debugfs_show,
|
||||
inode->i_private);
|
||||
}
|
||||
|
||||
static const struct file_operations hisi_sas_debugfs_ops = {
|
||||
.open = hisi_sas_debugfs_open,
|
||||
.read = seq_read,
|
||||
.write = hisi_sas_debugfs_write,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
static ssize_t hisi_sas_debugfs_phy_down_cnt_write(struct file *filp,
|
||||
const char __user *buf,
|
||||
size_t count, loff_t *ppos)
|
||||
|
@ -3902,6 +3943,9 @@ static void hisi_sas_debugfs_phy_down_cnt_init(struct hisi_hba *hisi_hba)
|
|||
|
||||
static void hisi_sas_debugfs_bist_init(struct hisi_hba *hisi_hba)
|
||||
{
|
||||
struct dentry *ports_dentry;
|
||||
int phy_no;
|
||||
|
||||
hisi_hba->debugfs_bist_dentry =
|
||||
debugfs_create_dir("bist", hisi_hba->debugfs_dir);
|
||||
debugfs_create_file("link_rate", 0600,
|
||||
|
@ -3912,6 +3956,16 @@ static void hisi_sas_debugfs_bist_init(struct hisi_hba *hisi_hba)
|
|||
hisi_hba->debugfs_bist_dentry, hisi_hba,
|
||||
&hisi_sas_debugfs_bist_code_mode_ops);
|
||||
|
||||
debugfs_create_file("fixed_code", 0600,
|
||||
hisi_hba->debugfs_bist_dentry,
|
||||
&hisi_hba->debugfs_bist_fixed_code[0],
|
||||
&hisi_sas_debugfs_ops);
|
||||
|
||||
debugfs_create_file("fixed_code_1", 0600,
|
||||
hisi_hba->debugfs_bist_dentry,
|
||||
&hisi_hba->debugfs_bist_fixed_code[1],
|
||||
&hisi_sas_debugfs_ops);
|
||||
|
||||
debugfs_create_file("phy_id", 0600, hisi_hba->debugfs_bist_dentry,
|
||||
hisi_hba, &hisi_sas_debugfs_bist_phy_ops);
|
||||
|
||||
|
@ -3925,6 +3979,27 @@ static void hisi_sas_debugfs_bist_init(struct hisi_hba *hisi_hba)
|
|||
debugfs_create_file("enable", 0600, hisi_hba->debugfs_bist_dentry,
|
||||
hisi_hba, &hisi_sas_debugfs_bist_enable_ops);
|
||||
|
||||
ports_dentry = debugfs_create_dir("port", hisi_hba->debugfs_bist_dentry);
|
||||
|
||||
for (phy_no = 0; phy_no < hisi_hba->n_phy; phy_no++) {
|
||||
struct dentry *port_dentry;
|
||||
struct dentry *ffe_dentry;
|
||||
char name[256];
|
||||
int i;
|
||||
|
||||
snprintf(name, 256, "%d", phy_no);
|
||||
port_dentry = debugfs_create_dir(name, ports_dentry);
|
||||
ffe_dentry = debugfs_create_dir("ffe", port_dentry);
|
||||
for (i = 0; i < FFE_CFG_MAX; i++) {
|
||||
if (i == FFE_RESV)
|
||||
continue;
|
||||
debugfs_create_file(hisi_sas_debugfs_ffe_name[i].name,
|
||||
0600, ffe_dentry,
|
||||
&hisi_hba->debugfs_bist_ffe[phy_no][i],
|
||||
&hisi_sas_debugfs_ops);
|
||||
}
|
||||
}
|
||||
|
||||
hisi_hba->debugfs_bist_linkrate = SAS_LINK_RATE_1_5_GBPS;
|
||||
}
|
||||
|
||||
|
|
|
@ -752,7 +752,7 @@ static int hw_init_v1_hw(struct hisi_hba *hisi_hba)
|
|||
|
||||
rc = reset_hw_v1_hw(hisi_hba);
|
||||
if (rc) {
|
||||
dev_err(dev, "hisi_sas_reset_hw failed, rc=%d", rc);
|
||||
dev_err(dev, "hisi_sas_reset_hw failed, rc=%d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -1166,7 +1166,7 @@ static void slot_err_v1_hw(struct hisi_hba *hisi_hba,
|
|||
case SAS_PROTOCOL_STP:
|
||||
case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
|
||||
{
|
||||
dev_err(dev, "slot err: SATA/STP not supported");
|
||||
dev_err(dev, "slot err: SATA/STP not supported\n");
|
||||
}
|
||||
break;
|
||||
default:
|
||||
|
@ -1218,35 +1218,35 @@ static void slot_complete_v1_hw(struct hisi_hba *hisi_hba,
|
|||
u32 info_reg = hisi_sas_read32(hisi_hba, HGC_INVLD_DQE_INFO);
|
||||
|
||||
if (info_reg & HGC_INVLD_DQE_INFO_DQ_MSK)
|
||||
dev_err(dev, "slot complete: [%d:%d] has dq IPTT err",
|
||||
dev_err(dev, "slot complete: [%d:%d] has dq IPTT err\n",
|
||||
slot->cmplt_queue, slot->cmplt_queue_slot);
|
||||
|
||||
if (info_reg & HGC_INVLD_DQE_INFO_TYPE_MSK)
|
||||
dev_err(dev, "slot complete: [%d:%d] has dq type err",
|
||||
dev_err(dev, "slot complete: [%d:%d] has dq type err\n",
|
||||
slot->cmplt_queue, slot->cmplt_queue_slot);
|
||||
|
||||
if (info_reg & HGC_INVLD_DQE_INFO_FORCE_MSK)
|
||||
dev_err(dev, "slot complete: [%d:%d] has dq force phy err",
|
||||
dev_err(dev, "slot complete: [%d:%d] has dq force phy err\n",
|
||||
slot->cmplt_queue, slot->cmplt_queue_slot);
|
||||
|
||||
if (info_reg & HGC_INVLD_DQE_INFO_PHY_MSK)
|
||||
dev_err(dev, "slot complete: [%d:%d] has dq phy id err",
|
||||
dev_err(dev, "slot complete: [%d:%d] has dq phy id err\n",
|
||||
slot->cmplt_queue, slot->cmplt_queue_slot);
|
||||
|
||||
if (info_reg & HGC_INVLD_DQE_INFO_ABORT_MSK)
|
||||
dev_err(dev, "slot complete: [%d:%d] has dq abort flag err",
|
||||
dev_err(dev, "slot complete: [%d:%d] has dq abort flag err\n",
|
||||
slot->cmplt_queue, slot->cmplt_queue_slot);
|
||||
|
||||
if (info_reg & HGC_INVLD_DQE_INFO_IPTT_OF_MSK)
|
||||
dev_err(dev, "slot complete: [%d:%d] has dq IPTT or ICT err",
|
||||
dev_err(dev, "slot complete: [%d:%d] has dq IPTT or ICT err\n",
|
||||
slot->cmplt_queue, slot->cmplt_queue_slot);
|
||||
|
||||
if (info_reg & HGC_INVLD_DQE_INFO_SSP_ERR_MSK)
|
||||
dev_err(dev, "slot complete: [%d:%d] has dq SSP frame type err",
|
||||
dev_err(dev, "slot complete: [%d:%d] has dq SSP frame type err\n",
|
||||
slot->cmplt_queue, slot->cmplt_queue_slot);
|
||||
|
||||
if (info_reg & HGC_INVLD_DQE_INFO_OFL_MSK)
|
||||
dev_err(dev, "slot complete: [%d:%d] has dq order frame len err",
|
||||
dev_err(dev, "slot complete: [%d:%d] has dq order frame len err\n",
|
||||
slot->cmplt_queue, slot->cmplt_queue_slot);
|
||||
|
||||
ts->stat = SAS_OPEN_REJECT;
|
||||
|
@ -1294,7 +1294,7 @@ static void slot_complete_v1_hw(struct hisi_hba *hisi_hba,
|
|||
case SAS_PROTOCOL_SATA:
|
||||
case SAS_PROTOCOL_STP:
|
||||
case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
|
||||
dev_err(dev, "slot complete: SATA/STP not supported");
|
||||
dev_err(dev, "slot complete: SATA/STP not supported\n");
|
||||
break;
|
||||
|
||||
default:
|
||||
|
@ -1417,7 +1417,7 @@ static irqreturn_t int_bcast_v1_hw(int irq, void *p)
|
|||
irq_value = hisi_sas_phy_read32(hisi_hba, phy_no, CHL_INT2);
|
||||
|
||||
if (!(irq_value & CHL_INT2_SL_RX_BC_ACK_MSK)) {
|
||||
dev_err(dev, "bcast: irq_value = %x not set enable bit",
|
||||
dev_err(dev, "bcast: irq_value = %x not set enable bit\n",
|
||||
irq_value);
|
||||
res = IRQ_NONE;
|
||||
goto end;
|
||||
|
|
|
@ -1202,7 +1202,7 @@ static void init_reg_v2_hw(struct hisi_hba *hisi_hba)
|
|||
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK3, 0x7ffe20fe);
|
||||
hisi_sas_write32(hisi_hba, SAS_ECC_INTR_MSK, 0xfff00c30);
|
||||
for (i = 0; i < hisi_hba->queue_count; i++)
|
||||
hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK+0x4*i, 0);
|
||||
hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK + 0x4 * i, 0);
|
||||
|
||||
hisi_sas_write32(hisi_hba, AXI_AHB_CLK_CFG, 1);
|
||||
hisi_sas_write32(hisi_hba, HYPER_STREAM_ID_EN_CFG, 1);
|
||||
|
@ -1382,7 +1382,7 @@ static int hw_init_v2_hw(struct hisi_hba *hisi_hba)
|
|||
|
||||
rc = reset_hw_v2_hw(hisi_hba);
|
||||
if (rc) {
|
||||
dev_err(dev, "hisi_sas_reset_hw failed, rc=%d", rc);
|
||||
dev_err(dev, "hisi_sas_reset_hw failed, rc=%d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
|
|
@ -191,8 +191,10 @@
|
|||
#define PHY_CFG_PHY_RST_OFF 3
|
||||
#define PHY_CFG_PHY_RST_MSK (0x1 << PHY_CFG_PHY_RST_OFF)
|
||||
#define PROG_PHY_LINK_RATE (PORT_BASE + 0x8)
|
||||
#define CFG_PROG_PHY_LINK_RATE_OFF 8
|
||||
#define CFG_PROG_PHY_LINK_RATE_MSK (0xf << CFG_PROG_PHY_LINK_RATE_OFF)
|
||||
#define CFG_PROG_PHY_LINK_RATE_OFF 0
|
||||
#define CFG_PROG_PHY_LINK_RATE_MSK (0xff << CFG_PROG_PHY_LINK_RATE_OFF)
|
||||
#define CFG_PROG_OOB_PHY_LINK_RATE_OFF 8
|
||||
#define CFG_PROG_OOB_PHY_LINK_RATE_MSK (0xf << CFG_PROG_OOB_PHY_LINK_RATE_OFF)
|
||||
#define PHY_CTRL (PORT_BASE + 0x14)
|
||||
#define PHY_CTRL_RESET_OFF 0
|
||||
#define PHY_CTRL_RESET_MSK (0x1 << PHY_CTRL_RESET_OFF)
|
||||
|
@ -295,6 +297,7 @@
|
|||
#define DMA_RX_STATUS_BUSY_MSK (0x1 << DMA_RX_STATUS_BUSY_OFF)
|
||||
|
||||
#define COARSETUNE_TIME (PORT_BASE + 0x304)
|
||||
#define TXDEEMPH_G1 (PORT_BASE + 0x350)
|
||||
#define ERR_CNT_DWS_LOST (PORT_BASE + 0x380)
|
||||
#define ERR_CNT_RESET_PROB (PORT_BASE + 0x384)
|
||||
#define ERR_CNT_INVLD_DW (PORT_BASE + 0x390)
|
||||
|
@ -565,7 +568,7 @@ static u32 hisi_sas_phy_read32(struct hisi_hba *hisi_hba,
|
|||
|
||||
static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
|
||||
{
|
||||
int i;
|
||||
int i, j;
|
||||
|
||||
/* Global registers init */
|
||||
hisi_sas_write32(hisi_hba, DLVRY_QUEUE_ENABLE,
|
||||
|
@ -593,25 +596,24 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
|
|||
hisi_sas_write32(hisi_hba, AWQOS_AWCACHE_CFG, 0xf0f0);
|
||||
hisi_sas_write32(hisi_hba, ARQOS_ARCACHE_CFG, 0xf0f0);
|
||||
for (i = 0; i < hisi_hba->queue_count; i++)
|
||||
hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK+0x4*i, 0);
|
||||
hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK + 0x4 * i, 0);
|
||||
|
||||
hisi_sas_write32(hisi_hba, HYPER_STREAM_ID_EN_CFG, 1);
|
||||
|
||||
for (i = 0; i < hisi_hba->n_phy; i++) {
|
||||
enum sas_linkrate max;
|
||||
struct hisi_sas_phy *phy = &hisi_hba->phy[i];
|
||||
struct asd_sas_phy *sas_phy = &phy->sas_phy;
|
||||
u32 prog_phy_link_rate = 0x800;
|
||||
u32 prog_phy_link_rate = hisi_sas_phy_read32(hisi_hba, i,
|
||||
PROG_PHY_LINK_RATE);
|
||||
|
||||
prog_phy_link_rate &= ~CFG_PROG_PHY_LINK_RATE_MSK;
|
||||
if (!sas_phy->phy || (sas_phy->phy->maximum_linkrate <
|
||||
SAS_LINK_RATE_1_5_GBPS)) {
|
||||
prog_phy_link_rate = 0x855;
|
||||
} else {
|
||||
enum sas_linkrate max = sas_phy->phy->maximum_linkrate;
|
||||
|
||||
prog_phy_link_rate =
|
||||
hisi_sas_get_prog_phy_linkrate_mask(max) |
|
||||
0x800;
|
||||
}
|
||||
SAS_LINK_RATE_1_5_GBPS))
|
||||
max = SAS_LINK_RATE_12_0_GBPS;
|
||||
else
|
||||
max = sas_phy->phy->maximum_linkrate;
|
||||
prog_phy_link_rate |= hisi_sas_get_prog_phy_linkrate_mask(max);
|
||||
hisi_sas_phy_write32(hisi_hba, i, PROG_PHY_LINK_RATE,
|
||||
prog_phy_link_rate);
|
||||
hisi_sas_phy_write32(hisi_hba, i, SERDES_CFG, 0xffc00);
|
||||
|
@ -636,6 +638,13 @@ static void init_reg_v3_hw(struct hisi_hba *hisi_hba)
|
|||
/* used for 12G negotiate */
|
||||
hisi_sas_phy_write32(hisi_hba, i, COARSETUNE_TIME, 0x1e);
|
||||
hisi_sas_phy_write32(hisi_hba, i, AIP_LIMIT, 0x2ffff);
|
||||
|
||||
/* get default FFE configuration for BIST */
|
||||
for (j = 0; j < FFE_CFG_MAX; j++) {
|
||||
u32 val = hisi_sas_phy_read32(hisi_hba, i,
|
||||
TXDEEMPH_G1 + (j * 0x4));
|
||||
hisi_hba->debugfs_bist_ffe[i][j] = val;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < hisi_hba->queue_count; i++) {
|
||||
|
@ -894,13 +903,14 @@ static int reset_hw_v3_hw(struct hisi_hba *hisi_hba)
|
|||
static int hw_init_v3_hw(struct hisi_hba *hisi_hba)
|
||||
{
|
||||
struct device *dev = hisi_hba->dev;
|
||||
struct acpi_device *acpi_dev;
|
||||
union acpi_object *obj;
|
||||
guid_t guid;
|
||||
int rc;
|
||||
|
||||
rc = reset_hw_v3_hw(hisi_hba);
|
||||
if (rc) {
|
||||
dev_err(dev, "hisi_sas_reset_hw failed, rc=%d", rc);
|
||||
dev_err(dev, "hisi_sas_reset_hw failed, rc=%d\n", rc);
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -924,6 +934,9 @@ static int hw_init_v3_hw(struct hisi_hba *hisi_hba)
|
|||
else
|
||||
ACPI_FREE(obj);
|
||||
|
||||
acpi_dev = ACPI_COMPANION(dev);
|
||||
if (!acpi_device_power_manageable(acpi_dev))
|
||||
dev_notice(dev, "neither _PS0 nor _PR0 is defined\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1341,7 +1354,6 @@ static void prep_smp_v3_hw(struct hisi_hba *hisi_hba,
|
|||
|
||||
hdr->cmd_table_addr = cpu_to_le64(req_dma_addr);
|
||||
hdr->sts_buffer_addr = cpu_to_le64(hisi_sas_status_buf_addr_dma(slot));
|
||||
|
||||
}
|
||||
|
||||
static void prep_ata_v3_hw(struct hisi_hba *hisi_hba,
|
||||
|
@ -1447,7 +1459,6 @@ static void prep_abort_v3_hw(struct hisi_hba *hisi_hba,
|
|||
/* dw7 */
|
||||
hdr->dw7 = cpu_to_le32(tag_to_abort << CMD_HDR_ABORT_IPTT_OFF);
|
||||
hdr->transfer_tags = cpu_to_le32(slot->idx);
|
||||
|
||||
}
|
||||
|
||||
static irqreturn_t phy_up_v3_hw(int phy_no, struct hisi_hba *hisi_hba)
|
||||
|
@ -2469,8 +2480,10 @@ static void phy_set_linkrate_v3_hw(struct hisi_hba *hisi_hba, int phy_no,
|
|||
struct sas_phy_linkrates *r)
|
||||
{
|
||||
enum sas_linkrate max = r->maximum_linkrate;
|
||||
u32 prog_phy_link_rate = 0x800;
|
||||
u32 prog_phy_link_rate = hisi_sas_phy_read32(hisi_hba, phy_no,
|
||||
PROG_PHY_LINK_RATE);
|
||||
|
||||
prog_phy_link_rate &= ~CFG_PROG_PHY_LINK_RATE_MSK;
|
||||
prog_phy_link_rate |= hisi_sas_get_prog_phy_linkrate_mask(max);
|
||||
hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE,
|
||||
prog_phy_link_rate);
|
||||
|
@ -2484,10 +2497,11 @@ static void interrupt_disable_v3_hw(struct hisi_hba *hisi_hba)
|
|||
synchronize_irq(pci_irq_vector(pdev, 1));
|
||||
synchronize_irq(pci_irq_vector(pdev, 2));
|
||||
synchronize_irq(pci_irq_vector(pdev, 11));
|
||||
for (i = 0; i < hisi_hba->queue_count; i++) {
|
||||
for (i = 0; i < hisi_hba->queue_count; i++)
|
||||
hisi_sas_write32(hisi_hba, OQ0_INT_SRC_MSK + 0x4 * i, 0x1);
|
||||
|
||||
for (i = 0; i < hisi_hba->cq_nvecs; i++)
|
||||
synchronize_irq(pci_irq_vector(pdev, i + 16));
|
||||
}
|
||||
|
||||
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK1, 0xffffffff);
|
||||
hisi_sas_write32(hisi_hba, ENT_INT_SRC_MSK2, 0xffffffff);
|
||||
|
@ -2712,6 +2726,33 @@ static ssize_t intr_coal_count_v3_hw_store(struct device *dev,
|
|||
}
|
||||
static DEVICE_ATTR_RW(intr_coal_count_v3_hw);
|
||||
|
||||
static int slave_configure_v3_hw(struct scsi_device *sdev)
|
||||
{
|
||||
struct Scsi_Host *shost = dev_to_shost(&sdev->sdev_gendev);
|
||||
struct domain_device *ddev = sdev_to_domain_dev(sdev);
|
||||
struct hisi_hba *hisi_hba = shost_priv(shost);
|
||||
struct device *dev = hisi_hba->dev;
|
||||
int ret = sas_slave_configure(sdev);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
if (!dev_is_sata(ddev))
|
||||
sas_change_queue_depth(sdev, 64);
|
||||
|
||||
if (sdev->type == TYPE_ENCLOSURE)
|
||||
return 0;
|
||||
|
||||
if (!device_link_add(&sdev->sdev_gendev, dev,
|
||||
DL_FLAG_PM_RUNTIME | DL_FLAG_RPM_ACTIVE)) {
|
||||
if (pm_runtime_enabled(dev)) {
|
||||
dev_info(dev, "add device link failed, disable runtime PM for the host\n");
|
||||
pm_runtime_disable(dev);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct device_attribute *host_attrs_v3_hw[] = {
|
||||
&dev_attr_phy_event_threshold,
|
||||
&dev_attr_intr_conv_v3_hw,
|
||||
|
@ -2936,42 +2977,48 @@ static void read_iost_itct_cache_v3_hw(struct hisi_hba *hisi_hba,
|
|||
static void hisi_sas_bist_test_prep_v3_hw(struct hisi_hba *hisi_hba)
|
||||
{
|
||||
u32 reg_val;
|
||||
int phy_id = hisi_hba->debugfs_bist_phy_no;
|
||||
int phy_no = hisi_hba->debugfs_bist_phy_no;
|
||||
int i;
|
||||
|
||||
/* disable PHY */
|
||||
hisi_sas_phy_enable(hisi_hba, phy_id, 0);
|
||||
hisi_sas_phy_enable(hisi_hba, phy_no, 0);
|
||||
|
||||
/* update FFE */
|
||||
for (i = 0; i < FFE_CFG_MAX; i++)
|
||||
hisi_sas_phy_write32(hisi_hba, phy_no, TXDEEMPH_G1 + (i * 0x4),
|
||||
hisi_hba->debugfs_bist_ffe[phy_no][i]);
|
||||
|
||||
/* disable ALOS */
|
||||
reg_val = hisi_sas_phy_read32(hisi_hba, phy_id, SERDES_CFG);
|
||||
reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, SERDES_CFG);
|
||||
reg_val |= CFG_ALOS_CHK_DISABLE_MSK;
|
||||
hisi_sas_phy_write32(hisi_hba, phy_id, SERDES_CFG, reg_val);
|
||||
hisi_sas_phy_write32(hisi_hba, phy_no, SERDES_CFG, reg_val);
|
||||
}
|
||||
|
||||
static void hisi_sas_bist_test_restore_v3_hw(struct hisi_hba *hisi_hba)
|
||||
{
|
||||
u32 reg_val;
|
||||
int phy_id = hisi_hba->debugfs_bist_phy_no;
|
||||
int phy_no = hisi_hba->debugfs_bist_phy_no;
|
||||
|
||||
/* disable loopback */
|
||||
reg_val = hisi_sas_phy_read32(hisi_hba, phy_id, SAS_PHY_BIST_CTRL);
|
||||
reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, SAS_PHY_BIST_CTRL);
|
||||
reg_val &= ~(CFG_RX_BIST_EN_MSK | CFG_TX_BIST_EN_MSK |
|
||||
CFG_BIST_TEST_MSK);
|
||||
hisi_sas_phy_write32(hisi_hba, phy_id, SAS_PHY_BIST_CTRL, reg_val);
|
||||
hisi_sas_phy_write32(hisi_hba, phy_no, SAS_PHY_BIST_CTRL, reg_val);
|
||||
|
||||
/* enable ALOS */
|
||||
reg_val = hisi_sas_phy_read32(hisi_hba, phy_id, SERDES_CFG);
|
||||
reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, SERDES_CFG);
|
||||
reg_val &= ~CFG_ALOS_CHK_DISABLE_MSK;
|
||||
hisi_sas_phy_write32(hisi_hba, phy_id, SERDES_CFG, reg_val);
|
||||
hisi_sas_phy_write32(hisi_hba, phy_no, SERDES_CFG, reg_val);
|
||||
|
||||
/* restore the linkrate */
|
||||
reg_val = hisi_sas_phy_read32(hisi_hba, phy_id, PROG_PHY_LINK_RATE);
|
||||
reg_val = hisi_sas_phy_read32(hisi_hba, phy_no, PROG_PHY_LINK_RATE);
|
||||
/* init OOB link rate as 1.5 Gbits */
|
||||
reg_val &= ~CFG_PROG_PHY_LINK_RATE_MSK;
|
||||
reg_val |= (0x8 << CFG_PROG_PHY_LINK_RATE_OFF);
|
||||
hisi_sas_phy_write32(hisi_hba, phy_id, PROG_PHY_LINK_RATE, reg_val);
|
||||
reg_val &= ~CFG_PROG_OOB_PHY_LINK_RATE_MSK;
|
||||
reg_val |= (0x8 << CFG_PROG_OOB_PHY_LINK_RATE_OFF);
|
||||
hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE, reg_val);
|
||||
|
||||
/* enable PHY */
|
||||
hisi_sas_phy_enable(hisi_hba, phy_id, 1);
|
||||
hisi_sas_phy_enable(hisi_hba, phy_no, 1);
|
||||
}
|
||||
|
||||
#define SAS_PHY_BIST_CODE_INIT 0x1
|
||||
|
@ -2980,60 +3027,75 @@ static int debugfs_set_bist_v3_hw(struct hisi_hba *hisi_hba, bool enable)
|
|||
{
|
||||
u32 reg_val, mode_tmp;
|
||||
u32 linkrate = hisi_hba->debugfs_bist_linkrate;
|
||||
u32 phy_id = hisi_hba->debugfs_bist_phy_no;
|
||||
u32 phy_no = hisi_hba->debugfs_bist_phy_no;
|
||||
u32 *ffe = hisi_hba->debugfs_bist_ffe[phy_no];
|
||||
u32 code_mode = hisi_hba->debugfs_bist_code_mode;
|
||||
u32 path_mode = hisi_hba->debugfs_bist_mode;
|
||||
u32 *fix_code = &hisi_hba->debugfs_bist_fixed_code[0];
|
||||
struct device *dev = hisi_hba->dev;
|
||||
|
||||
dev_info(dev, "BIST info:linkrate=%d phy_id=%d code_mode=%d path_mode=%d\n",
|
||||
linkrate, phy_id, code_mode, path_mode);
|
||||
dev_info(dev, "BIST info:phy%d link_rate=%d code_mode=%d path_mode=%d ffe={0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x} fixed_code={0x%x, 0x%x}\n",
|
||||
phy_no, linkrate, code_mode, path_mode,
|
||||
ffe[FFE_SAS_1_5_GBPS], ffe[FFE_SAS_3_0_GBPS],
|
||||
ffe[FFE_SAS_6_0_GBPS], ffe[FFE_SAS_12_0_GBPS],
|
||||
ffe[FFE_SATA_1_5_GBPS], ffe[FFE_SATA_3_0_GBPS],
|
||||
ffe[FFE_SATA_6_0_GBPS], fix_code[FIXED_CODE],
|
||||
fix_code[FIXED_CODE_1]);
|
||||
mode_tmp = path_mode ? 2 : 1;
|
||||
if (enable) {
|
||||
/* some preparations before bist test */
|
||||
hisi_sas_bist_test_prep_v3_hw(hisi_hba);
|
||||
|
||||
/* set linkrate of bit test*/
|
||||
reg_val = hisi_sas_phy_read32(hisi_hba, phy_id,
|
||||
reg_val = hisi_sas_phy_read32(hisi_hba, phy_no,
|
||||
PROG_PHY_LINK_RATE);
|
||||
reg_val &= ~CFG_PROG_PHY_LINK_RATE_MSK;
|
||||
reg_val |= (linkrate << CFG_PROG_PHY_LINK_RATE_OFF);
|
||||
hisi_sas_phy_write32(hisi_hba, phy_id,
|
||||
PROG_PHY_LINK_RATE, reg_val);
|
||||
reg_val &= ~CFG_PROG_OOB_PHY_LINK_RATE_MSK;
|
||||
reg_val |= (linkrate << CFG_PROG_OOB_PHY_LINK_RATE_OFF);
|
||||
hisi_sas_phy_write32(hisi_hba, phy_no, PROG_PHY_LINK_RATE,
|
||||
reg_val);
|
||||
|
||||
/* set code mode of bit test */
|
||||
reg_val = hisi_sas_phy_read32(hisi_hba, phy_id,
|
||||
reg_val = hisi_sas_phy_read32(hisi_hba, phy_no,
|
||||
SAS_PHY_BIST_CTRL);
|
||||
reg_val &= ~(CFG_BIST_MODE_SEL_MSK |
|
||||
CFG_LOOP_TEST_MODE_MSK |
|
||||
CFG_RX_BIST_EN_MSK |
|
||||
CFG_TX_BIST_EN_MSK |
|
||||
CFG_BIST_TEST_MSK);
|
||||
reg_val &= ~(CFG_BIST_MODE_SEL_MSK | CFG_LOOP_TEST_MODE_MSK |
|
||||
CFG_RX_BIST_EN_MSK | CFG_TX_BIST_EN_MSK |
|
||||
CFG_BIST_TEST_MSK);
|
||||
reg_val |= ((code_mode << CFG_BIST_MODE_SEL_OFF) |
|
||||
(mode_tmp << CFG_LOOP_TEST_MODE_OFF) |
|
||||
CFG_BIST_TEST_MSK);
|
||||
hisi_sas_phy_write32(hisi_hba, phy_id,
|
||||
SAS_PHY_BIST_CTRL, reg_val);
|
||||
hisi_sas_phy_write32(hisi_hba, phy_no, SAS_PHY_BIST_CTRL,
|
||||
reg_val);
|
||||
|
||||
/* set the bist init value */
|
||||
hisi_sas_phy_write32(hisi_hba, phy_id,
|
||||
SAS_PHY_BIST_CODE,
|
||||
SAS_PHY_BIST_CODE_INIT);
|
||||
hisi_sas_phy_write32(hisi_hba, phy_id,
|
||||
SAS_PHY_BIST_CODE1,
|
||||
SAS_PHY_BIST_CODE1_INIT);
|
||||
if (code_mode == HISI_SAS_BIST_CODE_MODE_FIXED_DATA) {
|
||||
reg_val = hisi_hba->debugfs_bist_fixed_code[0];
|
||||
hisi_sas_phy_write32(hisi_hba, phy_no,
|
||||
SAS_PHY_BIST_CODE, reg_val);
|
||||
|
||||
reg_val = hisi_hba->debugfs_bist_fixed_code[1];
|
||||
hisi_sas_phy_write32(hisi_hba, phy_no,
|
||||
SAS_PHY_BIST_CODE1, reg_val);
|
||||
} else {
|
||||
hisi_sas_phy_write32(hisi_hba, phy_no,
|
||||
SAS_PHY_BIST_CODE,
|
||||
SAS_PHY_BIST_CODE_INIT);
|
||||
hisi_sas_phy_write32(hisi_hba, phy_no,
|
||||
SAS_PHY_BIST_CODE1,
|
||||
SAS_PHY_BIST_CODE1_INIT);
|
||||
}
|
||||
|
||||
mdelay(100);
|
||||
reg_val |= (CFG_RX_BIST_EN_MSK | CFG_TX_BIST_EN_MSK);
|
||||
hisi_sas_phy_write32(hisi_hba, phy_id,
|
||||
SAS_PHY_BIST_CTRL, reg_val);
|
||||
hisi_sas_phy_write32(hisi_hba, phy_no, SAS_PHY_BIST_CTRL,
|
||||
reg_val);
|
||||
|
||||
/* clear error bit */
|
||||
mdelay(100);
|
||||
hisi_sas_phy_read32(hisi_hba, phy_id, SAS_BIST_ERR_CNT);
|
||||
hisi_sas_phy_read32(hisi_hba, phy_no, SAS_BIST_ERR_CNT);
|
||||
} else {
|
||||
/* disable bist test and recover it */
|
||||
hisi_hba->debugfs_bist_cnt += hisi_sas_phy_read32(hisi_hba,
|
||||
phy_id, SAS_BIST_ERR_CNT);
|
||||
phy_no, SAS_BIST_ERR_CNT);
|
||||
hisi_sas_bist_test_restore_v3_hw(hisi_hba);
|
||||
}
|
||||
|
||||
|
@ -3056,7 +3118,7 @@ static struct scsi_host_template sht_v3_hw = {
|
|||
.queuecommand = sas_queuecommand,
|
||||
.dma_need_drain = ata_scsi_dma_need_drain,
|
||||
.target_alloc = sas_target_alloc,
|
||||
.slave_configure = hisi_sas_slave_configure,
|
||||
.slave_configure = slave_configure_v3_hw,
|
||||
.scan_finished = hisi_sas_scan_finished,
|
||||
.scan_start = hisi_sas_scan_start,
|
||||
.map_queues = hisi_sas_map_queues,
|
||||
|
@ -3266,6 +3328,17 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
|
||||
scsi_scan_host(shost);
|
||||
|
||||
/*
|
||||
* For the situation that there are ATA disks connected with SAS
|
||||
* controller, it additionally creates ata_port which will affect the
|
||||
* child_count of hisi_hba->dev. Even if suspended all the disks,
|
||||
* ata_port is still and the child_count of hisi_hba->dev is not 0.
|
||||
* So use pm_suspend_ignore_children() to ignore the effect to
|
||||
* hisi_hba->dev.
|
||||
*/
|
||||
pm_suspend_ignore_children(dev, true);
|
||||
pm_runtime_put_noidle(&pdev->dev);
|
||||
|
||||
return 0;
|
||||
|
||||
err_out_register_ha:
|
||||
|
@ -3305,6 +3378,7 @@ static void hisi_sas_v3_remove(struct pci_dev *pdev)
|
|||
struct hisi_hba *hisi_hba = sha->lldd_ha;
|
||||
struct Scsi_Host *shost = sha->core.shost;
|
||||
|
||||
pm_runtime_get_noresume(dev);
|
||||
if (timer_pending(&hisi_hba->timer))
|
||||
del_timer(&hisi_hba->timer);
|
||||
|
||||
|
@ -3359,8 +3433,9 @@ enum {
|
|||
hip08,
|
||||
};
|
||||
|
||||
static int hisi_sas_v3_suspend(struct pci_dev *pdev, pm_message_t state)
|
||||
static int _suspend_v3_hw(struct device *device)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(device);
|
||||
struct sas_ha_struct *sha = pci_get_drvdata(pdev);
|
||||
struct hisi_hba *hisi_hba = sha->lldd_ha;
|
||||
struct device *dev = hisi_hba->dev;
|
||||
|
@ -3391,7 +3466,7 @@ static int hisi_sas_v3_suspend(struct pci_dev *pdev, pm_message_t state)
|
|||
|
||||
hisi_sas_init_mem(hisi_hba);
|
||||
|
||||
device_state = pci_choose_state(pdev, state);
|
||||
device_state = pci_choose_state(pdev, PMSG_SUSPEND);
|
||||
dev_warn(dev, "entering operating state [D%d]\n",
|
||||
device_state);
|
||||
pci_save_state(pdev);
|
||||
|
@ -3404,8 +3479,9 @@ static int hisi_sas_v3_suspend(struct pci_dev *pdev, pm_message_t state)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int hisi_sas_v3_resume(struct pci_dev *pdev)
|
||||
static int _resume_v3_hw(struct device *device)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(device);
|
||||
struct sas_ha_struct *sha = pci_get_drvdata(pdev);
|
||||
struct hisi_hba *hisi_hba = sha->lldd_ha;
|
||||
struct Scsi_Host *shost = hisi_hba->shost;
|
||||
|
@ -3442,6 +3518,34 @@ static int hisi_sas_v3_resume(struct pci_dev *pdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int suspend_v3_hw(struct device *device)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(device);
|
||||
struct sas_ha_struct *sha = pci_get_drvdata(pdev);
|
||||
struct hisi_hba *hisi_hba = sha->lldd_ha;
|
||||
int rc;
|
||||
|
||||
set_bit(HISI_SAS_PM_BIT, &hisi_hba->flags);
|
||||
|
||||
rc = _suspend_v3_hw(device);
|
||||
if (rc)
|
||||
clear_bit(HISI_SAS_PM_BIT, &hisi_hba->flags);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int resume_v3_hw(struct device *device)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(device);
|
||||
struct sas_ha_struct *sha = pci_get_drvdata(pdev);
|
||||
struct hisi_hba *hisi_hba = sha->lldd_ha;
|
||||
int rc = _resume_v3_hw(device);
|
||||
|
||||
clear_bit(HISI_SAS_PM_BIT, &hisi_hba->flags);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static const struct pci_device_id sas_v3_pci_table[] = {
|
||||
{ PCI_VDEVICE(HUAWEI, 0xa230), hip08 },
|
||||
{}
|
||||
|
@ -3453,14 +3557,29 @@ static const struct pci_error_handlers hisi_sas_err_handler = {
|
|||
.reset_done = hisi_sas_reset_done_v3_hw,
|
||||
};
|
||||
|
||||
static int runtime_suspend_v3_hw(struct device *dev)
|
||||
{
|
||||
return suspend_v3_hw(dev);
|
||||
}
|
||||
|
||||
static int runtime_resume_v3_hw(struct device *dev)
|
||||
{
|
||||
return resume_v3_hw(dev);
|
||||
}
|
||||
|
||||
static const struct dev_pm_ops hisi_sas_v3_pm_ops = {
|
||||
SET_SYSTEM_SLEEP_PM_OPS(suspend_v3_hw, resume_v3_hw)
|
||||
SET_RUNTIME_PM_OPS(runtime_suspend_v3_hw,
|
||||
runtime_resume_v3_hw, NULL)
|
||||
};
|
||||
|
||||
static struct pci_driver sas_v3_pci_driver = {
|
||||
.name = DRV_NAME,
|
||||
.id_table = sas_v3_pci_table,
|
||||
.probe = hisi_sas_v3_probe,
|
||||
.remove = hisi_sas_v3_remove,
|
||||
.suspend = hisi_sas_v3_suspend,
|
||||
.resume = hisi_sas_v3_resume,
|
||||
.err_handler = &hisi_sas_err_handler,
|
||||
.driver.pm = &hisi_sas_v3_pm_ops,
|
||||
};
|
||||
|
||||
module_pci_driver(sas_v3_pci_driver);
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
/*
|
||||
* Disk Array driver for HP Smart Array SAS controllers
|
||||
* Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries
|
||||
* Copyright 2016 Microsemi Corporation
|
||||
* Copyright 2014-2015 PMC-Sierra, Inc.
|
||||
* Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
|
||||
|
@ -9329,10 +9330,10 @@ static int hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)
|
|||
static void hpsa_free_ioaccel1_cmd_and_bft(struct ctlr_info *h)
|
||||
{
|
||||
if (h->ioaccel_cmd_pool) {
|
||||
pci_free_consistent(h->pdev,
|
||||
h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
|
||||
h->ioaccel_cmd_pool,
|
||||
h->ioaccel_cmd_pool_dhandle);
|
||||
dma_free_coherent(&h->pdev->dev,
|
||||
h->nr_cmds * sizeof(*h->ioaccel_cmd_pool),
|
||||
h->ioaccel_cmd_pool,
|
||||
h->ioaccel_cmd_pool_dhandle);
|
||||
h->ioaccel_cmd_pool = NULL;
|
||||
h->ioaccel_cmd_pool_dhandle = 0;
|
||||
}
|
||||
|
@ -9382,10 +9383,10 @@ static void hpsa_free_ioaccel2_cmd_and_bft(struct ctlr_info *h)
|
|||
hpsa_free_ioaccel2_sg_chain_blocks(h);
|
||||
|
||||
if (h->ioaccel2_cmd_pool) {
|
||||
pci_free_consistent(h->pdev,
|
||||
h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
|
||||
h->ioaccel2_cmd_pool,
|
||||
h->ioaccel2_cmd_pool_dhandle);
|
||||
dma_free_coherent(&h->pdev->dev,
|
||||
h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool),
|
||||
h->ioaccel2_cmd_pool,
|
||||
h->ioaccel2_cmd_pool_dhandle);
|
||||
h->ioaccel2_cmd_pool = NULL;
|
||||
h->ioaccel2_cmd_pool_dhandle = 0;
|
||||
}
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
/*
|
||||
* Disk Array driver for HP Smart Array SAS controllers
|
||||
* Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries
|
||||
* Copyright 2016 Microsemi Corporation
|
||||
* Copyright 2014-2015 PMC-Sierra, Inc.
|
||||
* Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
/*
|
||||
* Disk Array driver for HP Smart Array SAS controllers
|
||||
* Copyright (c) 2019-2020 Microchip Technology Inc. and its subsidiaries
|
||||
* Copyright 2016 Microsemi Corporation
|
||||
* Copyright 2014-2015 PMC-Sierra, Inc.
|
||||
* Copyright 2000,2009-2015 Hewlett-Packard Development Company, L.P.
|
||||
|
|
|
@ -134,6 +134,7 @@ static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *);
|
|||
static void ibmvfc_tgt_query_target(struct ibmvfc_target *);
|
||||
static void ibmvfc_npiv_logout(struct ibmvfc_host *);
|
||||
static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *);
|
||||
static void ibmvfc_tgt_move_login(struct ibmvfc_target *);
|
||||
|
||||
static const char *unknown_error = "unknown error";
|
||||
|
||||
|
@ -431,7 +432,20 @@ static int ibmvfc_set_tgt_action(struct ibmvfc_target *tgt,
|
|||
}
|
||||
break;
|
||||
case IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT:
|
||||
if (action == IBMVFC_TGT_ACTION_DEL_RPORT) {
|
||||
if (action == IBMVFC_TGT_ACTION_DEL_RPORT ||
|
||||
action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
|
||||
tgt->action = action;
|
||||
rc = 0;
|
||||
}
|
||||
break;
|
||||
case IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT:
|
||||
if (action == IBMVFC_TGT_ACTION_LOGOUT_RPORT) {
|
||||
tgt->action = action;
|
||||
rc = 0;
|
||||
}
|
||||
break;
|
||||
case IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT:
|
||||
if (action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
|
||||
tgt->action = action;
|
||||
rc = 0;
|
||||
}
|
||||
|
@ -441,16 +455,18 @@ static int ibmvfc_set_tgt_action(struct ibmvfc_target *tgt,
|
|||
tgt->action = action;
|
||||
rc = 0;
|
||||
}
|
||||
break;
|
||||
case IBMVFC_TGT_ACTION_DELETED_RPORT:
|
||||
break;
|
||||
default:
|
||||
if (action >= IBMVFC_TGT_ACTION_LOGOUT_RPORT)
|
||||
tgt->add_rport = 0;
|
||||
tgt->action = action;
|
||||
rc = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
if (action >= IBMVFC_TGT_ACTION_LOGOUT_RPORT)
|
||||
tgt->add_rport = 0;
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -548,7 +564,8 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
|
|||
**/
|
||||
static void ibmvfc_reinit_host(struct ibmvfc_host *vhost)
|
||||
{
|
||||
if (vhost->action == IBMVFC_HOST_ACTION_NONE) {
|
||||
if (vhost->action == IBMVFC_HOST_ACTION_NONE &&
|
||||
vhost->state == IBMVFC_ACTIVE) {
|
||||
if (!ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
|
||||
scsi_block_requests(vhost->host);
|
||||
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
|
||||
|
@ -2574,7 +2591,9 @@ static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
|
|||
struct ibmvfc_host *vhost = shost_priv(shost);
|
||||
struct fc_rport *dev_rport;
|
||||
struct scsi_device *sdev;
|
||||
unsigned long rc;
|
||||
struct ibmvfc_target *tgt;
|
||||
unsigned long rc, flags;
|
||||
unsigned int found;
|
||||
|
||||
ENTER;
|
||||
shost_for_each_device(sdev, shost) {
|
||||
|
@ -2588,6 +2607,27 @@ static void ibmvfc_terminate_rport_io(struct fc_rport *rport)
|
|||
|
||||
if (rc == FAILED)
|
||||
ibmvfc_issue_fc_host_lip(shost);
|
||||
|
||||
spin_lock_irqsave(shost->host_lock, flags);
|
||||
found = 0;
|
||||
list_for_each_entry(tgt, &vhost->targets, queue) {
|
||||
if (tgt->scsi_id == rport->port_id) {
|
||||
found++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (found && tgt->action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
|
||||
/*
|
||||
* If we get here, that means we previously attempted to send
|
||||
* an implicit logout to the target but it failed, most likely
|
||||
* due to I/O being pending, so we need to send it again
|
||||
*/
|
||||
ibmvfc_del_tgt(tgt);
|
||||
ibmvfc_reinit_host(vhost);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(shost->host_lock, flags);
|
||||
LEAVE;
|
||||
}
|
||||
|
||||
|
@ -3623,7 +3663,18 @@ static void ibmvfc_tgt_implicit_logout_and_del_done(struct ibmvfc_event *evt)
|
|||
|
||||
vhost->discovery_threads--;
|
||||
ibmvfc_free_event(evt);
|
||||
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
|
||||
|
||||
/*
|
||||
* If our state is IBMVFC_HOST_OFFLINE, we could be unloading the
|
||||
* driver in which case we need to free up all the targets. If we are
|
||||
* not unloading, we will still go through a hard reset to get out of
|
||||
* offline state, so there is no need to track the old targets in that
|
||||
* case.
|
||||
*/
|
||||
if (status == IBMVFC_MAD_SUCCESS || vhost->state == IBMVFC_HOST_OFFLINE)
|
||||
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
|
||||
else
|
||||
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT);
|
||||
|
||||
tgt_dbg(tgt, "Implicit Logout %s\n", (status == IBMVFC_MAD_SUCCESS) ? "succeeded" : "failed");
|
||||
kref_put(&tgt->kref, ibmvfc_release_tgt);
|
||||
|
@ -3661,6 +3712,94 @@ static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *tgt)
|
|||
tgt_dbg(tgt, "Sent Implicit Logout\n");
|
||||
}
|
||||
|
||||
/**
|
||||
* ibmvfc_tgt_move_login_done - Completion handler for Move Login
|
||||
* @evt: ibmvfc event struct
|
||||
*
|
||||
**/
|
||||
static void ibmvfc_tgt_move_login_done(struct ibmvfc_event *evt)
|
||||
{
|
||||
struct ibmvfc_target *tgt = evt->tgt;
|
||||
struct ibmvfc_host *vhost = evt->vhost;
|
||||
struct ibmvfc_move_login *rsp = &evt->xfer_iu->move_login;
|
||||
u32 status = be16_to_cpu(rsp->common.status);
|
||||
int level = IBMVFC_DEFAULT_LOG_LEVEL;
|
||||
|
||||
vhost->discovery_threads--;
|
||||
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
|
||||
switch (status) {
|
||||
case IBMVFC_MAD_SUCCESS:
|
||||
tgt_dbg(tgt, "Move Login succeeded for old scsi_id: %llX\n", tgt->old_scsi_id);
|
||||
tgt->ids.node_name = wwn_to_u64(rsp->service_parms.node_name);
|
||||
tgt->ids.port_name = wwn_to_u64(rsp->service_parms.port_name);
|
||||
tgt->ids.port_id = tgt->scsi_id;
|
||||
memcpy(&tgt->service_parms, &rsp->service_parms,
|
||||
sizeof(tgt->service_parms));
|
||||
memcpy(&tgt->service_parms_change, &rsp->service_parms_change,
|
||||
sizeof(tgt->service_parms_change));
|
||||
ibmvfc_init_tgt(tgt, ibmvfc_tgt_send_prli);
|
||||
break;
|
||||
case IBMVFC_MAD_DRIVER_FAILED:
|
||||
break;
|
||||
case IBMVFC_MAD_CRQ_ERROR:
|
||||
ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_move_login);
|
||||
break;
|
||||
case IBMVFC_MAD_FAILED:
|
||||
default:
|
||||
level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_move_login);
|
||||
|
||||
tgt_log(tgt, level,
|
||||
"Move Login failed: old scsi_id: %llX, flags:%x, vios_flags:%x, rc=0x%02X\n",
|
||||
tgt->old_scsi_id, be32_to_cpu(rsp->flags), be16_to_cpu(rsp->vios_flags),
|
||||
status);
|
||||
break;
|
||||
}
|
||||
|
||||
kref_put(&tgt->kref, ibmvfc_release_tgt);
|
||||
ibmvfc_free_event(evt);
|
||||
wake_up(&vhost->work_wait_q);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* ibmvfc_tgt_move_login - Initiate a move login for specified target
|
||||
* @tgt: ibmvfc target struct
|
||||
*
|
||||
**/
|
||||
static void ibmvfc_tgt_move_login(struct ibmvfc_target *tgt)
|
||||
{
|
||||
struct ibmvfc_host *vhost = tgt->vhost;
|
||||
struct ibmvfc_move_login *move;
|
||||
struct ibmvfc_event *evt;
|
||||
|
||||
if (vhost->discovery_threads >= disc_threads)
|
||||
return;
|
||||
|
||||
kref_get(&tgt->kref);
|
||||
evt = ibmvfc_get_event(vhost);
|
||||
vhost->discovery_threads++;
|
||||
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_INIT_WAIT);
|
||||
ibmvfc_init_event(evt, ibmvfc_tgt_move_login_done, IBMVFC_MAD_FORMAT);
|
||||
evt->tgt = tgt;
|
||||
move = &evt->iu.move_login;
|
||||
memset(move, 0, sizeof(*move));
|
||||
move->common.version = cpu_to_be32(1);
|
||||
move->common.opcode = cpu_to_be32(IBMVFC_MOVE_LOGIN);
|
||||
move->common.length = cpu_to_be16(sizeof(*move));
|
||||
|
||||
move->old_scsi_id = cpu_to_be64(tgt->old_scsi_id);
|
||||
move->new_scsi_id = cpu_to_be64(tgt->scsi_id);
|
||||
move->wwpn = cpu_to_be64(tgt->wwpn);
|
||||
move->node_name = cpu_to_be64(tgt->ids.node_name);
|
||||
|
||||
if (ibmvfc_send_event(evt, vhost, default_timeout)) {
|
||||
vhost->discovery_threads--;
|
||||
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
|
||||
kref_put(&tgt->kref, ibmvfc_release_tgt);
|
||||
} else
|
||||
tgt_dbg(tgt, "Sent Move Login for old scsi_id: %llX\n", tgt->old_scsi_id);
|
||||
}
|
||||
|
||||
/**
|
||||
* ibmvfc_adisc_needs_plogi - Does device need PLOGI?
|
||||
* @mad: ibmvfc passthru mad struct
|
||||
|
@ -3979,31 +4118,77 @@ static void ibmvfc_tgt_query_target(struct ibmvfc_target *tgt)
|
|||
* Returns:
|
||||
* 0 on success / other on failure
|
||||
**/
|
||||
static int ibmvfc_alloc_target(struct ibmvfc_host *vhost, u64 scsi_id)
|
||||
static int ibmvfc_alloc_target(struct ibmvfc_host *vhost,
|
||||
struct ibmvfc_discover_targets_entry *target)
|
||||
{
|
||||
struct ibmvfc_target *stgt = NULL;
|
||||
struct ibmvfc_target *wtgt = NULL;
|
||||
struct ibmvfc_target *tgt;
|
||||
unsigned long flags;
|
||||
u64 scsi_id = be32_to_cpu(target->scsi_id) & IBMVFC_DISC_TGT_SCSI_ID_MASK;
|
||||
u64 wwpn = be64_to_cpu(target->wwpn);
|
||||
|
||||
/* Look to see if we already have a target allocated for this SCSI ID or WWPN */
|
||||
spin_lock_irqsave(vhost->host->host_lock, flags);
|
||||
list_for_each_entry(tgt, &vhost->targets, queue) {
|
||||
if (tgt->scsi_id == scsi_id) {
|
||||
if (tgt->need_login)
|
||||
ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
|
||||
goto unlock_out;
|
||||
if (tgt->wwpn == wwpn) {
|
||||
wtgt = tgt;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
list_for_each_entry(tgt, &vhost->targets, queue) {
|
||||
if (tgt->scsi_id == scsi_id) {
|
||||
stgt = tgt;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (wtgt && !stgt) {
|
||||
/*
|
||||
* A WWPN target has moved and we still are tracking the old
|
||||
* SCSI ID. The only way we should be able to get here is if
|
||||
* we attempted to send an implicit logout for the old SCSI ID
|
||||
* and it failed for some reason, such as there being I/O
|
||||
* pending to the target. In this case, we will have already
|
||||
* deleted the rport from the FC transport so we do a move
|
||||
* login, which works even with I/O pending, as it will cancel
|
||||
* any active commands.
|
||||
*/
|
||||
if (wtgt->action == IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT) {
|
||||
/*
|
||||
* Do a move login here. The old target is no longer
|
||||
* known to the transport layer We don't use the
|
||||
* normal ibmvfc_set_tgt_action to set this, as we
|
||||
* don't normally want to allow this state change.
|
||||
*/
|
||||
wtgt->old_scsi_id = wtgt->scsi_id;
|
||||
wtgt->scsi_id = scsi_id;
|
||||
wtgt->action = IBMVFC_TGT_ACTION_INIT;
|
||||
ibmvfc_init_tgt(wtgt, ibmvfc_tgt_move_login);
|
||||
goto unlock_out;
|
||||
} else {
|
||||
tgt_err(wtgt, "Unexpected target state: %d, %p\n",
|
||||
wtgt->action, wtgt->rport);
|
||||
}
|
||||
} else if (stgt) {
|
||||
if (tgt->need_login)
|
||||
ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
|
||||
goto unlock_out;
|
||||
}
|
||||
spin_unlock_irqrestore(vhost->host->host_lock, flags);
|
||||
|
||||
tgt = mempool_alloc(vhost->tgt_pool, GFP_NOIO);
|
||||
memset(tgt, 0, sizeof(*tgt));
|
||||
tgt->scsi_id = scsi_id;
|
||||
tgt->wwpn = wwpn;
|
||||
tgt->vhost = vhost;
|
||||
tgt->need_login = 1;
|
||||
tgt->cancel_key = vhost->task_set++;
|
||||
timer_setup(&tgt->timer, ibmvfc_adisc_timeout, 0);
|
||||
kref_init(&tgt->kref);
|
||||
ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout);
|
||||
spin_lock_irqsave(vhost->host->host_lock, flags);
|
||||
tgt->cancel_key = vhost->task_set++;
|
||||
list_add_tail(&tgt->queue, &vhost->targets);
|
||||
|
||||
unlock_out:
|
||||
|
@ -4023,9 +4208,7 @@ static int ibmvfc_alloc_targets(struct ibmvfc_host *vhost)
|
|||
int i, rc;
|
||||
|
||||
for (i = 0, rc = 0; !rc && i < vhost->num_targets; i++)
|
||||
rc = ibmvfc_alloc_target(vhost,
|
||||
be32_to_cpu(vhost->disc_buf->scsi_id[i]) &
|
||||
IBMVFC_DISC_TGT_SCSI_ID_MASK);
|
||||
rc = ibmvfc_alloc_target(vhost, &vhost->disc_buf[i]);
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
@ -4085,6 +4268,7 @@ static void ibmvfc_discover_targets(struct ibmvfc_host *vhost)
|
|||
mad->bufflen = cpu_to_be32(vhost->disc_buf_sz);
|
||||
mad->buffer.va = cpu_to_be64(vhost->disc_buf_dma);
|
||||
mad->buffer.len = cpu_to_be32(vhost->disc_buf_sz);
|
||||
mad->flags = cpu_to_be32(IBMVFC_DISC_TGT_PORT_ID_WWPN_LIST);
|
||||
ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT_WAIT);
|
||||
|
||||
if (!ibmvfc_send_event(evt, vhost, default_timeout))
|
||||
|
@ -4420,6 +4604,13 @@ static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
|
|||
del_timer_sync(&tgt->timer);
|
||||
kref_put(&tgt->kref, ibmvfc_release_tgt);
|
||||
return;
|
||||
} else if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
|
||||
tgt_dbg(tgt, "Deleting rport with outstanding I/O\n");
|
||||
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT);
|
||||
tgt->rport = NULL;
|
||||
spin_unlock_irqrestore(vhost->host->host_lock, flags);
|
||||
fc_remote_port_delete(rport);
|
||||
return;
|
||||
} else if (rport && tgt->action == IBMVFC_TGT_ACTION_DELETED_RPORT) {
|
||||
spin_unlock_irqrestore(vhost->host->host_lock, flags);
|
||||
return;
|
||||
|
@ -4543,6 +4734,15 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
|
|||
del_timer_sync(&tgt->timer);
|
||||
kref_put(&tgt->kref, ibmvfc_release_tgt);
|
||||
return;
|
||||
} else if (tgt->action == IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT) {
|
||||
tgt_dbg(tgt, "Deleting rport with I/O outstanding\n");
|
||||
rport = tgt->rport;
|
||||
tgt->rport = NULL;
|
||||
ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT);
|
||||
spin_unlock_irqrestore(vhost->host->host_lock, flags);
|
||||
if (rport)
|
||||
fc_remote_port_delete(rport);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4775,7 +4975,7 @@ static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost)
|
|||
goto free_sg_pool;
|
||||
}
|
||||
|
||||
vhost->disc_buf_sz = sizeof(vhost->disc_buf->scsi_id[0]) * max_targets;
|
||||
vhost->disc_buf_sz = sizeof(*vhost->disc_buf) * max_targets;
|
||||
vhost->disc_buf = dma_alloc_coherent(dev, vhost->disc_buf_sz,
|
||||
&vhost->disc_buf_dma, GFP_KERNEL);
|
||||
|
||||
|
@ -4928,6 +5128,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
|
|||
if (IS_ERR(vhost->work_thread)) {
|
||||
dev_err(dev, "Couldn't create kernel thread: %ld\n",
|
||||
PTR_ERR(vhost->work_thread));
|
||||
rc = PTR_ERR(vhost->work_thread);
|
||||
goto free_host_mem;
|
||||
}
|
||||
|
||||
|
|
|
@ -120,10 +120,14 @@ enum ibmvfc_mad_types {
|
|||
IBMVFC_PORT_LOGIN = 0x0004,
|
||||
IBMVFC_PROCESS_LOGIN = 0x0008,
|
||||
IBMVFC_QUERY_TARGET = 0x0010,
|
||||
IBMVFC_MOVE_LOGIN = 0x0020,
|
||||
IBMVFC_IMPLICIT_LOGOUT = 0x0040,
|
||||
IBMVFC_PASSTHRU = 0x0200,
|
||||
IBMVFC_TMF_MAD = 0x0100,
|
||||
IBMVFC_NPIV_LOGOUT = 0x0800,
|
||||
IBMVFC_CHANNEL_ENQUIRY = 0x1000,
|
||||
IBMVFC_CHANNEL_SETUP = 0x2000,
|
||||
IBMVFC_CONNECTION_INFO = 0x4000,
|
||||
};
|
||||
|
||||
struct ibmvfc_mad_common {
|
||||
|
@ -133,16 +137,16 @@ struct ibmvfc_mad_common {
|
|||
__be16 status;
|
||||
__be16 length;
|
||||
__be64 tag;
|
||||
}__attribute__((packed, aligned (8)));
|
||||
} __packed __aligned(8);
|
||||
|
||||
struct ibmvfc_npiv_login_mad {
|
||||
struct ibmvfc_mad_common common;
|
||||
struct srp_direct_buf buffer;
|
||||
}__attribute__((packed, aligned (8)));
|
||||
} __packed __aligned(8);
|
||||
|
||||
struct ibmvfc_npiv_logout_mad {
|
||||
struct ibmvfc_mad_common common;
|
||||
}__attribute__((packed, aligned (8)));
|
||||
} __packed __aligned(8);
|
||||
|
||||
#define IBMVFC_MAX_NAME 256
|
||||
|
||||
|
@ -162,13 +166,15 @@ struct ibmvfc_npiv_login {
|
|||
__be32 max_cmds;
|
||||
__be64 capabilities;
|
||||
#define IBMVFC_CAN_MIGRATE 0x01
|
||||
#define IBMVFC_CAN_USE_CHANNELS 0x02
|
||||
#define IBMVFC_CAN_HANDLE_FPIN 0x04
|
||||
__be64 node_name;
|
||||
struct srp_direct_buf async;
|
||||
u8 partition_name[IBMVFC_MAX_NAME];
|
||||
u8 device_name[IBMVFC_MAX_NAME];
|
||||
u8 drc_name[IBMVFC_MAX_NAME];
|
||||
__be64 reserved2[2];
|
||||
}__attribute__((packed, aligned (8)));
|
||||
} __packed __aligned(8);
|
||||
|
||||
struct ibmvfc_common_svc_parms {
|
||||
__be16 fcph_version;
|
||||
|
@ -177,7 +183,7 @@ struct ibmvfc_common_svc_parms {
|
|||
__be16 bb_rcv_sz; /* upper nibble is BB_SC_N */
|
||||
__be32 ratov;
|
||||
__be32 edtov;
|
||||
}__attribute__((packed, aligned (4)));
|
||||
} __packed __aligned(4);
|
||||
|
||||
struct ibmvfc_service_parms {
|
||||
struct ibmvfc_common_svc_parms common;
|
||||
|
@ -192,7 +198,8 @@ struct ibmvfc_service_parms {
|
|||
__be32 ext_len;
|
||||
__be32 reserved[30];
|
||||
__be32 clk_sync_qos[2];
|
||||
}__attribute__((packed, aligned (4)));
|
||||
__be32 reserved2;
|
||||
} __packed __aligned(4);
|
||||
|
||||
struct ibmvfc_npiv_login_resp {
|
||||
__be32 version;
|
||||
|
@ -204,6 +211,7 @@ struct ibmvfc_npiv_login_resp {
|
|||
__be64 capabilities;
|
||||
#define IBMVFC_CAN_FLUSH_ON_HALT 0x08
|
||||
#define IBMVFC_CAN_SUPPRESS_ABTS 0x10
|
||||
#define IBMVFC_CAN_SUPPORT_CHANNELS 0x20
|
||||
__be32 max_cmds;
|
||||
__be32 scsi_id_sz;
|
||||
__be64 max_dma_len;
|
||||
|
@ -217,29 +225,32 @@ struct ibmvfc_npiv_login_resp {
|
|||
u8 drc_name[IBMVFC_MAX_NAME];
|
||||
struct ibmvfc_service_parms service_parms;
|
||||
__be64 reserved2;
|
||||
}__attribute__((packed, aligned (8)));
|
||||
} __packed __aligned(8);
|
||||
|
||||
union ibmvfc_npiv_login_data {
|
||||
struct ibmvfc_npiv_login login;
|
||||
struct ibmvfc_npiv_login_resp resp;
|
||||
}__attribute__((packed, aligned (8)));
|
||||
} __packed __aligned(8);
|
||||
|
||||
struct ibmvfc_discover_targets_buf {
|
||||
__be32 scsi_id[1];
|
||||
struct ibmvfc_discover_targets_entry {
|
||||
__be32 scsi_id;
|
||||
__be32 pad;
|
||||
__be64 wwpn;
|
||||
#define IBMVFC_DISC_TGT_SCSI_ID_MASK 0x00ffffff
|
||||
};
|
||||
} __packed __aligned(8);
|
||||
|
||||
struct ibmvfc_discover_targets {
|
||||
struct ibmvfc_mad_common common;
|
||||
struct srp_direct_buf buffer;
|
||||
__be32 flags;
|
||||
#define IBMVFC_DISC_TGT_PORT_ID_WWPN_LIST 0x02
|
||||
__be16 status;
|
||||
__be16 error;
|
||||
__be32 bufflen;
|
||||
__be32 num_avail;
|
||||
__be32 num_written;
|
||||
__be64 reserved[2];
|
||||
}__attribute__((packed, aligned (8)));
|
||||
} __packed __aligned(8);
|
||||
|
||||
enum ibmvfc_fc_reason {
|
||||
IBMVFC_INVALID_ELS_CMD_CODE = 0x01,
|
||||
|
@ -283,7 +294,27 @@ struct ibmvfc_port_login {
|
|||
struct ibmvfc_service_parms service_parms;
|
||||
struct ibmvfc_service_parms service_parms_change;
|
||||
__be64 reserved3[2];
|
||||
}__attribute__((packed, aligned (8)));
|
||||
} __packed __aligned(8);
|
||||
|
||||
struct ibmvfc_move_login {
|
||||
struct ibmvfc_mad_common common;
|
||||
__be64 old_scsi_id;
|
||||
__be64 new_scsi_id;
|
||||
__be64 wwpn;
|
||||
__be64 node_name;
|
||||
__be32 flags;
|
||||
#define IBMVFC_MOVE_LOGIN_IMPLICIT_OLD_FAILED 0x01
|
||||
#define IBMVFC_MOVE_LOGIN_IMPLICIT_NEW_FAILED 0x02
|
||||
#define IBMVFC_MOVE_LOGIN_PORT_LOGIN_FAILED 0x04
|
||||
__be32 reserved;
|
||||
struct ibmvfc_service_parms service_parms;
|
||||
struct ibmvfc_service_parms service_parms_change;
|
||||
__be32 reserved2;
|
||||
__be16 service_class;
|
||||
__be16 vios_flags;
|
||||
#define IBMVFC_MOVE_LOGIN_VF_NOT_SENT_ADAPTER 0x01
|
||||
__be64 reserved3;
|
||||
} __packed __aligned(8);
|
||||
|
||||
struct ibmvfc_prli_svc_parms {
|
||||
u8 type;
|
||||
|
@ -303,7 +334,7 @@ struct ibmvfc_prli_svc_parms {
|
|||
#define IBMVFC_PRLI_TARGET_FUNC 0x00000010
|
||||
#define IBMVFC_PRLI_READ_FCP_XFER_RDY_DISABLED 0x00000002
|
||||
#define IBMVFC_PRLI_WR_FCP_XFER_RDY_DISABLED 0x00000001
|
||||
}__attribute__((packed, aligned (4)));
|
||||
} __packed __aligned(4);
|
||||
|
||||
struct ibmvfc_process_login {
|
||||
struct ibmvfc_mad_common common;
|
||||
|
@ -314,7 +345,7 @@ struct ibmvfc_process_login {
|
|||
__be16 error; /* also fc_reason */
|
||||
__be32 reserved2;
|
||||
__be64 reserved3[2];
|
||||
}__attribute__((packed, aligned (8)));
|
||||
} __packed __aligned(8);
|
||||
|
||||
struct ibmvfc_query_tgt {
|
||||
struct ibmvfc_mad_common common;
|
||||
|
@ -325,13 +356,13 @@ struct ibmvfc_query_tgt {
|
|||
__be16 fc_explain;
|
||||
__be16 fc_type;
|
||||
__be64 reserved[2];
|
||||
}__attribute__((packed, aligned (8)));
|
||||
} __packed __aligned(8);
|
||||
|
||||
struct ibmvfc_implicit_logout {
|
||||
struct ibmvfc_mad_common common;
|
||||
__be64 old_scsi_id;
|
||||
__be64 reserved[2];
|
||||
}__attribute__((packed, aligned (8)));
|
||||
} __packed __aligned(8);
|
||||
|
||||
struct ibmvfc_tmf {
|
||||
struct ibmvfc_mad_common common;
|
||||
|
@ -348,7 +379,7 @@ struct ibmvfc_tmf {
|
|||
__be32 my_cancel_key;
|
||||
__be32 pad;
|
||||
__be64 reserved[2];
|
||||
}__attribute__((packed, aligned (8)));
|
||||
} __packed __aligned(8);
|
||||
|
||||
enum ibmvfc_fcp_rsp_info_codes {
|
||||
RSP_NO_FAILURE = 0x00,
|
||||
|
@ -361,7 +392,7 @@ struct ibmvfc_fcp_rsp_info {
|
|||
u8 reserved[3];
|
||||
u8 rsp_code;
|
||||
u8 reserved2[4];
|
||||
}__attribute__((packed, aligned (2)));
|
||||
} __packed __aligned(2);
|
||||
|
||||
enum ibmvfc_fcp_rsp_flags {
|
||||
FCP_BIDI_RSP = 0x80,
|
||||
|
@ -377,7 +408,7 @@ enum ibmvfc_fcp_rsp_flags {
|
|||
union ibmvfc_fcp_rsp_data {
|
||||
struct ibmvfc_fcp_rsp_info info;
|
||||
u8 sense[SCSI_SENSE_BUFFERSIZE + sizeof(struct ibmvfc_fcp_rsp_info)];
|
||||
}__attribute__((packed, aligned (8)));
|
||||
} __packed __aligned(8);
|
||||
|
||||
struct ibmvfc_fcp_rsp {
|
||||
__be64 reserved;
|
||||
|
@ -388,7 +419,7 @@ struct ibmvfc_fcp_rsp {
|
|||
__be32 fcp_sense_len;
|
||||
__be32 fcp_rsp_len;
|
||||
union ibmvfc_fcp_rsp_data data;
|
||||
}__attribute__((packed, aligned (8)));
|
||||
} __packed __aligned(8);
|
||||
|
||||
enum ibmvfc_cmd_flags {
|
||||
IBMVFC_SCATTERLIST = 0x0001,
|
||||
|
@ -422,7 +453,7 @@ struct ibmvfc_fcp_cmd_iu {
|
|||
#define IBMVFC_WRDATA 0x01
|
||||
u8 cdb[IBMVFC_MAX_CDB_LEN];
|
||||
__be32 xfer_len;
|
||||
}__attribute__((packed, aligned (4)));
|
||||
} __packed __aligned(4);
|
||||
|
||||
struct ibmvfc_cmd {
|
||||
__be64 task_tag;
|
||||
|
@ -446,7 +477,7 @@ struct ibmvfc_cmd {
|
|||
__be64 reserved3[2];
|
||||
struct ibmvfc_fcp_cmd_iu iu;
|
||||
struct ibmvfc_fcp_rsp rsp;
|
||||
}__attribute__((packed, aligned (8)));
|
||||
} __packed __aligned(8);
|
||||
|
||||
struct ibmvfc_passthru_fc_iu {
|
||||
__be32 payload[7];
|
||||
|
@ -473,18 +504,64 @@ struct ibmvfc_passthru_iu {
|
|||
__be64 scsi_id;
|
||||
__be64 tag;
|
||||
__be64 reserved2[2];
|
||||
}__attribute__((packed, aligned (8)));
|
||||
} __packed __aligned(8);
|
||||
|
||||
struct ibmvfc_passthru_mad {
|
||||
struct ibmvfc_mad_common common;
|
||||
struct srp_direct_buf cmd_ioba;
|
||||
struct ibmvfc_passthru_iu iu;
|
||||
struct ibmvfc_passthru_fc_iu fc_iu;
|
||||
}__attribute__((packed, aligned (8)));
|
||||
} __packed __aligned(8);
|
||||
|
||||
struct ibmvfc_channel_enquiry {
|
||||
struct ibmvfc_mad_common common;
|
||||
__be32 flags;
|
||||
#define IBMVFC_NO_CHANNELS_TO_CRQ_SUPPORT 0x01
|
||||
#define IBMVFC_SUPPORT_VARIABLE_SUBQ_MSG 0x02
|
||||
#define IBMVFC_NO_N_TO_M_CHANNELS_SUPPORT 0x04
|
||||
__be32 num_scsi_subq_channels;
|
||||
__be32 num_nvmeof_subq_channels;
|
||||
__be32 num_scsi_vas_channels;
|
||||
__be32 num_nvmeof_vas_channels;
|
||||
} __packed __aligned(8);
|
||||
|
||||
struct ibmvfc_channel_setup_mad {
|
||||
struct ibmvfc_mad_common common;
|
||||
struct srp_direct_buf buffer;
|
||||
} __packed __aligned(8);
|
||||
|
||||
#define IBMVFC_MAX_CHANNELS 502
|
||||
|
||||
struct ibmvfc_channel_setup {
|
||||
__be32 flags;
|
||||
#define IBMVFC_CANCEL_CHANNELS 0x01
|
||||
#define IBMVFC_USE_BUFFER 0x02
|
||||
#define IBMVFC_CHANNELS_CANCELED 0x04
|
||||
__be32 reserved;
|
||||
__be32 num_scsi_subq_channels;
|
||||
__be32 num_nvmeof_subq_channels;
|
||||
__be32 num_scsi_vas_channels;
|
||||
__be32 num_nvmeof_vas_channels;
|
||||
struct srp_direct_buf buffer;
|
||||
__be64 reserved2[5];
|
||||
__be64 channel_handles[IBMVFC_MAX_CHANNELS];
|
||||
} __packed __aligned(8);
|
||||
|
||||
struct ibmvfc_connection_info {
|
||||
struct ibmvfc_mad_common common;
|
||||
__be64 information_bits;
|
||||
#define IBMVFC_NO_FC_IO_CHANNEL 0x01
|
||||
#define IBMVFC_NO_PHYP_VAS 0x02
|
||||
#define IBMVFC_NO_PHYP_SUBQ 0x04
|
||||
#define IBMVFC_PHYP_DEPRECATED_SUBQ 0x08
|
||||
#define IBMVFC_PHYP_PRESERVED_SUBQ 0x10
|
||||
#define IBMVFC_PHYP_FULL_SUBQ 0x20
|
||||
__be64 reserved[16];
|
||||
} __packed __aligned(8);
|
||||
|
||||
struct ibmvfc_trace_start_entry {
|
||||
u32 xfer_len;
|
||||
}__attribute__((packed));
|
||||
} __packed;
|
||||
|
||||
struct ibmvfc_trace_end_entry {
|
||||
u16 status;
|
||||
|
@ -493,7 +570,7 @@ struct ibmvfc_trace_end_entry {
|
|||
u8 rsp_code;
|
||||
u8 scsi_status;
|
||||
u8 reserved;
|
||||
}__attribute__((packed));
|
||||
} __packed;
|
||||
|
||||
struct ibmvfc_trace_entry {
|
||||
struct ibmvfc_event *evt;
|
||||
|
@ -510,7 +587,7 @@ struct ibmvfc_trace_entry {
|
|||
struct ibmvfc_trace_start_entry start;
|
||||
struct ibmvfc_trace_end_entry end;
|
||||
} u;
|
||||
}__attribute__((packed, aligned (8)));
|
||||
} __packed __aligned(8);
|
||||
|
||||
enum ibmvfc_crq_formats {
|
||||
IBMVFC_CMD_FORMAT = 0x01,
|
||||
|
@ -532,6 +609,7 @@ enum ibmvfc_async_event {
|
|||
IBMVFC_AE_HALT = 0x0400,
|
||||
IBMVFC_AE_RESUME = 0x0800,
|
||||
IBMVFC_AE_ADAPTER_FAILED = 0x1000,
|
||||
IBMVFC_AE_FPIN = 0x2000,
|
||||
};
|
||||
|
||||
struct ibmvfc_async_desc {
|
||||
|
@ -545,7 +623,7 @@ struct ibmvfc_crq {
|
|||
volatile u8 format;
|
||||
u8 reserved[6];
|
||||
volatile __be64 ioba;
|
||||
}__attribute__((packed, aligned (8)));
|
||||
} __packed __aligned(8);
|
||||
|
||||
struct ibmvfc_crq_queue {
|
||||
struct ibmvfc_crq *msgs;
|
||||
|
@ -560,17 +638,25 @@ enum ibmvfc_ae_link_state {
|
|||
IBMVFC_AE_LS_LINK_DEAD = 0x08,
|
||||
};
|
||||
|
||||
enum ibmvfc_ae_fpin_status {
|
||||
IBMVFC_AE_FPIN_LINK_CONGESTED = 0x1,
|
||||
IBMVFC_AE_FPIN_PORT_CONGESTED = 0x2,
|
||||
IBMVFC_AE_FPIN_PORT_CLEARED = 0x3,
|
||||
IBMVFC_AE_FPIN_PORT_DEGRADED = 0x4,
|
||||
};
|
||||
|
||||
struct ibmvfc_async_crq {
|
||||
volatile u8 valid;
|
||||
u8 link_state;
|
||||
u8 pad[2];
|
||||
u8 fpin_status;
|
||||
u8 pad;
|
||||
__be32 pad2;
|
||||
volatile __be64 event;
|
||||
volatile __be64 scsi_id;
|
||||
volatile __be64 wwpn;
|
||||
volatile __be64 node_name;
|
||||
__be64 reserved;
|
||||
}__attribute__((packed, aligned (8)));
|
||||
} __packed __aligned(8);
|
||||
|
||||
struct ibmvfc_async_crq_queue {
|
||||
struct ibmvfc_async_crq *msgs;
|
||||
|
@ -585,12 +671,16 @@ union ibmvfc_iu {
|
|||
struct ibmvfc_discover_targets discover_targets;
|
||||
struct ibmvfc_port_login plogi;
|
||||
struct ibmvfc_process_login prli;
|
||||
struct ibmvfc_move_login move_login;
|
||||
struct ibmvfc_query_tgt query_tgt;
|
||||
struct ibmvfc_implicit_logout implicit_logout;
|
||||
struct ibmvfc_tmf tmf;
|
||||
struct ibmvfc_cmd cmd;
|
||||
struct ibmvfc_passthru_mad passthru;
|
||||
}__attribute__((packed, aligned (8)));
|
||||
struct ibmvfc_channel_enquiry channel_enquiry;
|
||||
struct ibmvfc_channel_setup_mad channel_setup;
|
||||
struct ibmvfc_connection_info connection_info;
|
||||
} __packed __aligned(8);
|
||||
|
||||
enum ibmvfc_target_action {
|
||||
IBMVFC_TGT_ACTION_NONE = 0,
|
||||
|
@ -600,12 +690,16 @@ enum ibmvfc_target_action {
|
|||
IBMVFC_TGT_ACTION_LOGOUT_RPORT_WAIT,
|
||||
IBMVFC_TGT_ACTION_DEL_RPORT,
|
||||
IBMVFC_TGT_ACTION_DELETED_RPORT,
|
||||
IBMVFC_TGT_ACTION_DEL_AND_LOGOUT_RPORT,
|
||||
IBMVFC_TGT_ACTION_LOGOUT_DELETED_RPORT,
|
||||
};
|
||||
|
||||
struct ibmvfc_target {
|
||||
struct list_head queue;
|
||||
struct ibmvfc_host *vhost;
|
||||
u64 scsi_id;
|
||||
u64 wwpn;
|
||||
u64 old_scsi_id;
|
||||
struct fc_rport *rport;
|
||||
int target_id;
|
||||
enum ibmvfc_target_action action;
|
||||
|
@ -701,7 +795,7 @@ struct ibmvfc_host {
|
|||
dma_addr_t login_buf_dma;
|
||||
int disc_buf_sz;
|
||||
int log_level;
|
||||
struct ibmvfc_discover_targets_buf *disc_buf;
|
||||
struct ibmvfc_discover_targets_entry *disc_buf;
|
||||
struct mutex passthru_mutex;
|
||||
int task_set;
|
||||
int init_retries;
|
||||
|
|
|
@ -2671,7 +2671,6 @@ enum sci_status sci_controller_complete_io(struct isci_host *ihost,
|
|||
struct isci_request *ireq)
|
||||
{
|
||||
enum sci_status status;
|
||||
u16 index;
|
||||
|
||||
switch (ihost->sm.current_state_id) {
|
||||
case SCIC_STOPPING:
|
||||
|
@ -2682,7 +2681,6 @@ enum sci_status sci_controller_complete_io(struct isci_host *ihost,
|
|||
if (status != SCI_SUCCESS)
|
||||
return status;
|
||||
|
||||
index = ISCI_TAG_TCI(ireq->io_tag);
|
||||
clear_bit(IREQ_ACTIVE, &ireq->flags);
|
||||
return SCI_SUCCESS;
|
||||
default:
|
||||
|
|
|
@ -142,7 +142,7 @@ static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, c
|
|||
|
||||
static DEVICE_ATTR(isci_id, S_IRUGO, isci_show_id, NULL);
|
||||
|
||||
struct device_attribute *isci_host_attrs[] = {
|
||||
static struct device_attribute *isci_host_attrs[] = {
|
||||
&dev_attr_isci_id,
|
||||
NULL
|
||||
};
|
||||
|
|
|
@ -669,7 +669,7 @@ static const char *phy_event_name(u32 event_code)
|
|||
phy_state_name(state), phy_event_name(code), code)
|
||||
|
||||
|
||||
void scu_link_layer_set_txcomsas_timeout(struct isci_phy *iphy, u32 timeout)
|
||||
static void scu_link_layer_set_txcomsas_timeout(struct isci_phy *iphy, u32 timeout)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
|
|
|
@ -201,21 +201,9 @@ static struct platform_driver esp_jazz_driver = {
|
|||
.name = "jazz_esp",
|
||||
},
|
||||
};
|
||||
|
||||
static int __init jazz_esp_init(void)
|
||||
{
|
||||
return platform_driver_register(&esp_jazz_driver);
|
||||
}
|
||||
|
||||
static void __exit jazz_esp_exit(void)
|
||||
{
|
||||
platform_driver_unregister(&esp_jazz_driver);
|
||||
}
|
||||
module_platform_driver(esp_jazz_driver);
|
||||
|
||||
MODULE_DESCRIPTION("JAZZ ESP SCSI driver");
|
||||
MODULE_AUTHOR("Thomas Bogendoerfer (tsbogend@alpha.franken.de)");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_VERSION(DRV_VERSION);
|
||||
|
||||
module_init(jazz_esp_init);
|
||||
module_exit(jazz_esp_exit);
|
||||
|
|
|
@ -301,8 +301,8 @@ static void fc_disc_error(struct fc_disc *disc, struct fc_frame *fp)
|
|||
struct fc_lport *lport = fc_disc_lport(disc);
|
||||
unsigned long delay = 0;
|
||||
|
||||
FC_DISC_DBG(disc, "Error %ld, retries %d/%d\n",
|
||||
PTR_ERR(fp), disc->retry_count,
|
||||
FC_DISC_DBG(disc, "Error %d, retries %d/%d\n",
|
||||
PTR_ERR_OR_ZERO(fp), disc->retry_count,
|
||||
FC_DISC_RETRY_LIMIT);
|
||||
|
||||
if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) {
|
||||
|
|
|
@ -726,19 +726,13 @@ void sas_resume_sata(struct asd_sas_port *port)
|
|||
*/
|
||||
int sas_discover_sata(struct domain_device *dev)
|
||||
{
|
||||
int res;
|
||||
|
||||
if (dev->dev_type == SAS_SATA_PM)
|
||||
return -ENODEV;
|
||||
|
||||
dev->sata_dev.class = sas_get_ata_command_set(dev);
|
||||
sas_fill_in_rphy(dev, dev->rphy);
|
||||
|
||||
res = sas_notify_lldd_dev_found(dev);
|
||||
if (res)
|
||||
return res;
|
||||
|
||||
return 0;
|
||||
return sas_notify_lldd_dev_found(dev);
|
||||
}
|
||||
|
||||
static void async_sas_ata_eh(void *data, async_cookie_t cookie)
|
||||
|
|
|
@ -278,13 +278,7 @@ static void sas_resume_devices(struct work_struct *work)
|
|||
*/
|
||||
int sas_discover_end_dev(struct domain_device *dev)
|
||||
{
|
||||
int res;
|
||||
|
||||
res = sas_notify_lldd_dev_found(dev);
|
||||
if (res)
|
||||
return res;
|
||||
|
||||
return 0;
|
||||
return sas_notify_lldd_dev_found(dev);
|
||||
}
|
||||
|
||||
/* ---------- Device registration and unregistration ---------- */
|
||||
|
|
|
@ -5338,8 +5338,7 @@ static ssize_t
|
|||
lpfc_fcp_cpu_map_store(struct device *dev, struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
int status = -EINVAL;
|
||||
return status;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -387,6 +387,8 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
|
|||
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, geniocb, 0);
|
||||
|
||||
if (rc == IOCB_ERROR) {
|
||||
geniocb->context_un.ndlp = NULL;
|
||||
lpfc_nlp_put(ndlp);
|
||||
lpfc_sli_release_iocbq(phba, geniocb);
|
||||
return 1;
|
||||
}
|
||||
|
|
|
@ -1696,7 +1696,6 @@ static int
|
|||
lpfc_debugfs_hdwqstat_data(struct lpfc_vport *vport, char *buf, int size)
|
||||
{
|
||||
struct lpfc_hba *phba = vport->phba;
|
||||
struct lpfc_sli4_hdw_queue *qp;
|
||||
struct lpfc_hdwq_stat *c_stat;
|
||||
int i, j, len;
|
||||
uint32_t tot_xmt;
|
||||
|
@ -1726,8 +1725,6 @@ lpfc_debugfs_hdwqstat_data(struct lpfc_vport *vport, char *buf, int size)
|
|||
goto buffer_done;
|
||||
|
||||
for (i = 0; i < phba->cfg_hdw_queue; i++) {
|
||||
qp = &phba->sli4_hba.hdwq[i];
|
||||
|
||||
tot_rcv = 0;
|
||||
tot_xmt = 0;
|
||||
tot_cmpl = 0;
|
||||
|
@ -5944,7 +5941,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
|
|||
phba, &lpfc_debugfs_op_lockstat);
|
||||
if (!phba->debug_lockstat) {
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT,
|
||||
"4610 Cant create debugfs lockstat\n");
|
||||
"4610 Can't create debugfs lockstat\n");
|
||||
goto debug_failed;
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -439,22 +439,10 @@ static struct platform_driver esp_mac_driver = {
|
|||
.name = DRV_MODULE_NAME,
|
||||
},
|
||||
};
|
||||
|
||||
static int __init mac_esp_init(void)
|
||||
{
|
||||
return platform_driver_register(&esp_mac_driver);
|
||||
}
|
||||
|
||||
static void __exit mac_esp_exit(void)
|
||||
{
|
||||
platform_driver_unregister(&esp_mac_driver);
|
||||
}
|
||||
module_platform_driver(esp_mac_driver);
|
||||
|
||||
MODULE_DESCRIPTION("Mac ESP SCSI driver");
|
||||
MODULE_AUTHOR("Finn Thain");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_VERSION(DRV_VERSION);
|
||||
MODULE_ALIAS("platform:" DRV_MODULE_NAME);
|
||||
|
||||
module_init(mac_esp_init);
|
||||
module_exit(mac_esp_exit);
|
||||
|
|
|
@ -133,8 +133,10 @@ mega_setup_mailbox(adapter_t *adapter)
|
|||
{
|
||||
unsigned long align;
|
||||
|
||||
adapter->una_mbox64 = pci_alloc_consistent(adapter->dev,
|
||||
sizeof(mbox64_t), &adapter->una_mbox64_dma);
|
||||
adapter->una_mbox64 = dma_alloc_coherent(&adapter->dev->dev,
|
||||
sizeof(mbox64_t),
|
||||
&adapter->una_mbox64_dma,
|
||||
GFP_KERNEL);
|
||||
|
||||
if( !adapter->una_mbox64 ) return -1;
|
||||
|
||||
|
@ -222,8 +224,9 @@ mega_query_adapter(adapter_t *adapter)
|
|||
mraid_inquiry *inq;
|
||||
dma_addr_t dma_handle;
|
||||
|
||||
ext_inq = pci_alloc_consistent(adapter->dev,
|
||||
sizeof(mraid_ext_inquiry), &dma_handle);
|
||||
ext_inq = dma_alloc_coherent(&adapter->dev->dev,
|
||||
sizeof(mraid_ext_inquiry),
|
||||
&dma_handle, GFP_KERNEL);
|
||||
|
||||
if( ext_inq == NULL ) return -1;
|
||||
|
||||
|
@ -243,8 +246,9 @@ mega_query_adapter(adapter_t *adapter)
|
|||
mega_8_to_40ld(inq, inquiry3,
|
||||
(mega_product_info *)&adapter->product_info);
|
||||
|
||||
pci_free_consistent(adapter->dev, sizeof(mraid_ext_inquiry),
|
||||
ext_inq, dma_handle);
|
||||
dma_free_coherent(&adapter->dev->dev,
|
||||
sizeof(mraid_ext_inquiry), ext_inq,
|
||||
dma_handle);
|
||||
|
||||
} else { /*adapter supports 40ld */
|
||||
adapter->flag |= BOARD_40LD;
|
||||
|
@ -253,9 +257,10 @@ mega_query_adapter(adapter_t *adapter)
|
|||
* get product_info, which is static information and will be
|
||||
* unchanged
|
||||
*/
|
||||
prod_info_dma_handle = pci_map_single(adapter->dev, (void *)
|
||||
&adapter->product_info,
|
||||
sizeof(mega_product_info), PCI_DMA_FROMDEVICE);
|
||||
prod_info_dma_handle = dma_map_single(&adapter->dev->dev,
|
||||
(void *)&adapter->product_info,
|
||||
sizeof(mega_product_info),
|
||||
DMA_FROM_DEVICE);
|
||||
|
||||
mbox->m_out.xferaddr = prod_info_dma_handle;
|
||||
|
||||
|
@ -267,8 +272,8 @@ mega_query_adapter(adapter_t *adapter)
|
|||
"Product_info cmd failed with error: %d\n",
|
||||
retval);
|
||||
|
||||
pci_unmap_single(adapter->dev, prod_info_dma_handle,
|
||||
sizeof(mega_product_info), PCI_DMA_FROMDEVICE);
|
||||
dma_unmap_single(&adapter->dev->dev, prod_info_dma_handle,
|
||||
sizeof(mega_product_info), DMA_FROM_DEVICE);
|
||||
}
|
||||
|
||||
|
||||
|
@ -645,7 +650,7 @@ mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy)
|
|||
scb->raw_mbox[2] = MEGA_RESERVATION_STATUS;
|
||||
scb->raw_mbox[3] = ldrv_num;
|
||||
|
||||
scb->dma_direction = PCI_DMA_NONE;
|
||||
scb->dma_direction = DMA_NONE;
|
||||
|
||||
return scb;
|
||||
#else
|
||||
|
@ -709,7 +714,7 @@ mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy)
|
|||
mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU;
|
||||
}
|
||||
|
||||
scb->dma_direction = PCI_DMA_FROMDEVICE;
|
||||
scb->dma_direction = DMA_FROM_DEVICE;
|
||||
|
||||
pthru->numsgelements = mega_build_sglist(adapter, scb,
|
||||
&pthru->dataxferaddr, &pthru->dataxferlen);
|
||||
|
@ -839,10 +844,10 @@ mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy)
|
|||
* If it is a read command
|
||||
*/
|
||||
if( (*cmd->cmnd & 0x0F) == 0x08 ) {
|
||||
scb->dma_direction = PCI_DMA_FROMDEVICE;
|
||||
scb->dma_direction = DMA_FROM_DEVICE;
|
||||
}
|
||||
else {
|
||||
scb->dma_direction = PCI_DMA_TODEVICE;
|
||||
scb->dma_direction = DMA_TO_DEVICE;
|
||||
}
|
||||
|
||||
/* Calculate Scatter-Gather info */
|
||||
|
@ -877,7 +882,7 @@ mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy)
|
|||
|
||||
scb->raw_mbox[3] = ldrv_num;
|
||||
|
||||
scb->dma_direction = PCI_DMA_NONE;
|
||||
scb->dma_direction = DMA_NONE;
|
||||
|
||||
return scb;
|
||||
#endif
|
||||
|
@ -971,7 +976,7 @@ mega_prepare_passthru(adapter_t *adapter, scb_t *scb, struct scsi_cmnd *cmd,
|
|||
memcpy(pthru->cdb, cmd->cmnd, cmd->cmd_len);
|
||||
|
||||
/* Not sure about the direction */
|
||||
scb->dma_direction = PCI_DMA_BIDIRECTIONAL;
|
||||
scb->dma_direction = DMA_BIDIRECTIONAL;
|
||||
|
||||
/* Special Code for Handling READ_CAPA/ INQ using bounce buffers */
|
||||
switch (cmd->cmnd[0]) {
|
||||
|
@ -1035,7 +1040,7 @@ mega_prepare_extpassthru(adapter_t *adapter, scb_t *scb,
|
|||
memcpy(epthru->cdb, cmd->cmnd, cmd->cmd_len);
|
||||
|
||||
/* Not sure about the direction */
|
||||
scb->dma_direction = PCI_DMA_BIDIRECTIONAL;
|
||||
scb->dma_direction = DMA_BIDIRECTIONAL;
|
||||
|
||||
switch(cmd->cmnd[0]) {
|
||||
case INQUIRY:
|
||||
|
@ -1813,25 +1818,25 @@ mega_free_sgl(adapter_t *adapter)
|
|||
scb = &adapter->scb_list[i];
|
||||
|
||||
if( scb->sgl64 ) {
|
||||
pci_free_consistent(adapter->dev,
|
||||
sizeof(mega_sgl64) * adapter->sglen,
|
||||
scb->sgl64,
|
||||
scb->sgl_dma_addr);
|
||||
dma_free_coherent(&adapter->dev->dev,
|
||||
sizeof(mega_sgl64) * adapter->sglen,
|
||||
scb->sgl64, scb->sgl_dma_addr);
|
||||
|
||||
scb->sgl64 = NULL;
|
||||
}
|
||||
|
||||
if( scb->pthru ) {
|
||||
pci_free_consistent(adapter->dev, sizeof(mega_passthru),
|
||||
scb->pthru, scb->pthru_dma_addr);
|
||||
dma_free_coherent(&adapter->dev->dev,
|
||||
sizeof(mega_passthru), scb->pthru,
|
||||
scb->pthru_dma_addr);
|
||||
|
||||
scb->pthru = NULL;
|
||||
}
|
||||
|
||||
if( scb->epthru ) {
|
||||
pci_free_consistent(adapter->dev,
|
||||
sizeof(mega_ext_passthru),
|
||||
scb->epthru, scb->epthru_dma_addr);
|
||||
dma_free_coherent(&adapter->dev->dev,
|
||||
sizeof(mega_ext_passthru),
|
||||
scb->epthru, scb->epthru_dma_addr);
|
||||
|
||||
scb->epthru = NULL;
|
||||
}
|
||||
|
@ -2004,7 +2009,7 @@ make_local_pdev(adapter_t *adapter, struct pci_dev **pdev)
|
|||
|
||||
memcpy(*pdev, adapter->dev, sizeof(struct pci_dev));
|
||||
|
||||
if( pci_set_dma_mask(*pdev, DMA_BIT_MASK(32)) != 0 ) {
|
||||
if (dma_set_mask(&(*pdev)->dev, DMA_BIT_MASK(32)) != 0) {
|
||||
kfree(*pdev);
|
||||
return -1;
|
||||
}
|
||||
|
@ -2028,14 +2033,16 @@ free_local_pdev(struct pci_dev *pdev)
|
|||
static inline void *
|
||||
mega_allocate_inquiry(dma_addr_t *dma_handle, struct pci_dev *pdev)
|
||||
{
|
||||
return pci_alloc_consistent(pdev, sizeof(mega_inquiry3), dma_handle);
|
||||
return dma_alloc_coherent(&pdev->dev, sizeof(mega_inquiry3),
|
||||
dma_handle, GFP_KERNEL);
|
||||
}
|
||||
|
||||
|
||||
static inline void
|
||||
mega_free_inquiry(void *inquiry, dma_addr_t dma_handle, struct pci_dev *pdev)
|
||||
{
|
||||
pci_free_consistent(pdev, sizeof(mega_inquiry3), inquiry, dma_handle);
|
||||
dma_free_coherent(&pdev->dev, sizeof(mega_inquiry3), inquiry,
|
||||
dma_handle);
|
||||
}
|
||||
|
||||
|
||||
|
@ -2349,7 +2356,8 @@ proc_show_pdrv(struct seq_file *m, adapter_t *adapter, int channel)
|
|||
}
|
||||
|
||||
|
||||
scsi_inq = pci_alloc_consistent(pdev, 256, &scsi_inq_dma_handle);
|
||||
scsi_inq = dma_alloc_coherent(&pdev->dev, 256, &scsi_inq_dma_handle,
|
||||
GFP_KERNEL);
|
||||
if( scsi_inq == NULL ) {
|
||||
seq_puts(m, "memory not available for scsi inq.\n");
|
||||
goto free_inquiry;
|
||||
|
@ -2422,7 +2430,7 @@ proc_show_pdrv(struct seq_file *m, adapter_t *adapter, int channel)
|
|||
}
|
||||
|
||||
free_pci:
|
||||
pci_free_consistent(pdev, 256, scsi_inq, scsi_inq_dma_handle);
|
||||
dma_free_coherent(&pdev->dev, 256, scsi_inq, scsi_inq_dma_handle);
|
||||
free_inquiry:
|
||||
mega_free_inquiry(inquiry, dma_handle, pdev);
|
||||
free_pdev:
|
||||
|
@ -2542,8 +2550,8 @@ proc_show_rdrv(struct seq_file *m, adapter_t *adapter, int start, int end )
|
|||
raid_inq.logdrv_info.num_ldrv;
|
||||
}
|
||||
|
||||
disk_array = pci_alloc_consistent(pdev, array_sz,
|
||||
&disk_array_dma_handle);
|
||||
disk_array = dma_alloc_coherent(&pdev->dev, array_sz,
|
||||
&disk_array_dma_handle, GFP_KERNEL);
|
||||
|
||||
if( disk_array == NULL ) {
|
||||
seq_puts(m, "memory not available.\n");
|
||||
|
@ -2662,8 +2670,8 @@ proc_show_rdrv(struct seq_file *m, adapter_t *adapter, int start, int end )
|
|||
}
|
||||
|
||||
free_pci:
|
||||
pci_free_consistent(pdev, array_sz, disk_array,
|
||||
disk_array_dma_handle);
|
||||
dma_free_coherent(&pdev->dev, array_sz, disk_array,
|
||||
disk_array_dma_handle);
|
||||
free_inquiry:
|
||||
mega_free_inquiry(inquiry, dma_handle, pdev);
|
||||
free_pdev:
|
||||
|
@ -2881,9 +2889,9 @@ mega_init_scb(adapter_t *adapter)
|
|||
|
||||
scb->idx = i;
|
||||
|
||||
scb->sgl64 = pci_alloc_consistent(adapter->dev,
|
||||
sizeof(mega_sgl64) * adapter->sglen,
|
||||
&scb->sgl_dma_addr);
|
||||
scb->sgl64 = dma_alloc_coherent(&adapter->dev->dev,
|
||||
sizeof(mega_sgl64) * adapter->sglen,
|
||||
&scb->sgl_dma_addr, GFP_KERNEL);
|
||||
|
||||
scb->sgl = (mega_sglist *)scb->sgl64;
|
||||
|
||||
|
@ -2893,9 +2901,9 @@ mega_init_scb(adapter_t *adapter)
|
|||
return -1;
|
||||
}
|
||||
|
||||
scb->pthru = pci_alloc_consistent(adapter->dev,
|
||||
sizeof(mega_passthru),
|
||||
&scb->pthru_dma_addr);
|
||||
scb->pthru = dma_alloc_coherent(&adapter->dev->dev,
|
||||
sizeof(mega_passthru),
|
||||
&scb->pthru_dma_addr, GFP_KERNEL);
|
||||
|
||||
if( !scb->pthru ) {
|
||||
dev_warn(&adapter->dev->dev, "RAID: Can't allocate passthru\n");
|
||||
|
@ -2903,9 +2911,9 @@ mega_init_scb(adapter_t *adapter)
|
|||
return -1;
|
||||
}
|
||||
|
||||
scb->epthru = pci_alloc_consistent(adapter->dev,
|
||||
sizeof(mega_ext_passthru),
|
||||
&scb->epthru_dma_addr);
|
||||
scb->epthru = dma_alloc_coherent(&adapter->dev->dev,
|
||||
sizeof(mega_ext_passthru),
|
||||
&scb->epthru_dma_addr, GFP_KERNEL);
|
||||
|
||||
if( !scb->epthru ) {
|
||||
dev_warn(&adapter->dev->dev,
|
||||
|
@ -3145,9 +3153,9 @@ megadev_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
|
|||
if( uioc.uioc_rmbox[0] == MEGA_MBOXCMD_PASSTHRU ) {
|
||||
/* Passthru commands */
|
||||
|
||||
pthru = pci_alloc_consistent(pdev,
|
||||
sizeof(mega_passthru),
|
||||
&pthru_dma_hndl);
|
||||
pthru = dma_alloc_coherent(&pdev->dev,
|
||||
sizeof(mega_passthru),
|
||||
&pthru_dma_hndl, GFP_KERNEL);
|
||||
|
||||
if( pthru == NULL ) {
|
||||
free_local_pdev(pdev);
|
||||
|
@ -3165,9 +3173,9 @@ megadev_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
|
|||
if( copy_from_user(pthru, upthru,
|
||||
sizeof(mega_passthru)) ) {
|
||||
|
||||
pci_free_consistent(pdev,
|
||||
sizeof(mega_passthru), pthru,
|
||||
pthru_dma_hndl);
|
||||
dma_free_coherent(&pdev->dev,
|
||||
sizeof(mega_passthru),
|
||||
pthru, pthru_dma_hndl);
|
||||
|
||||
free_local_pdev(pdev);
|
||||
|
||||
|
@ -3178,15 +3186,16 @@ megadev_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
|
|||
* Is there a data transfer
|
||||
*/
|
||||
if( pthru->dataxferlen ) {
|
||||
data = pci_alloc_consistent(pdev,
|
||||
pthru->dataxferlen,
|
||||
&data_dma_hndl);
|
||||
data = dma_alloc_coherent(&pdev->dev,
|
||||
pthru->dataxferlen,
|
||||
&data_dma_hndl,
|
||||
GFP_KERNEL);
|
||||
|
||||
if( data == NULL ) {
|
||||
pci_free_consistent(pdev,
|
||||
sizeof(mega_passthru),
|
||||
pthru,
|
||||
pthru_dma_hndl);
|
||||
dma_free_coherent(&pdev->dev,
|
||||
sizeof(mega_passthru),
|
||||
pthru,
|
||||
pthru_dma_hndl);
|
||||
|
||||
free_local_pdev(pdev);
|
||||
|
||||
|
@ -3251,13 +3260,13 @@ megadev_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
|
|||
|
||||
freemem_and_return:
|
||||
if( pthru->dataxferlen ) {
|
||||
pci_free_consistent(pdev,
|
||||
pthru->dataxferlen, data,
|
||||
data_dma_hndl);
|
||||
dma_free_coherent(&pdev->dev,
|
||||
pthru->dataxferlen, data,
|
||||
data_dma_hndl);
|
||||
}
|
||||
|
||||
pci_free_consistent(pdev, sizeof(mega_passthru),
|
||||
pthru, pthru_dma_hndl);
|
||||
dma_free_coherent(&pdev->dev, sizeof(mega_passthru),
|
||||
pthru, pthru_dma_hndl);
|
||||
|
||||
free_local_pdev(pdev);
|
||||
|
||||
|
@ -3270,8 +3279,10 @@ megadev_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
|
|||
* Is there a data transfer
|
||||
*/
|
||||
if( uioc.xferlen ) {
|
||||
data = pci_alloc_consistent(pdev,
|
||||
uioc.xferlen, &data_dma_hndl);
|
||||
data = dma_alloc_coherent(&pdev->dev,
|
||||
uioc.xferlen,
|
||||
&data_dma_hndl,
|
||||
GFP_KERNEL);
|
||||
|
||||
if( data == NULL ) {
|
||||
free_local_pdev(pdev);
|
||||
|
@ -3291,9 +3302,9 @@ megadev_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
|
|||
if( copy_from_user(data, (char __user *)(unsigned long) uxferaddr,
|
||||
uioc.xferlen) ) {
|
||||
|
||||
pci_free_consistent(pdev,
|
||||
uioc.xferlen,
|
||||
data, data_dma_hndl);
|
||||
dma_free_coherent(&pdev->dev,
|
||||
uioc.xferlen, data,
|
||||
data_dma_hndl);
|
||||
|
||||
free_local_pdev(pdev);
|
||||
|
||||
|
@ -3314,9 +3325,9 @@ megadev_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
|
|||
|
||||
if( rval ) {
|
||||
if( uioc.xferlen ) {
|
||||
pci_free_consistent(pdev,
|
||||
uioc.xferlen, data,
|
||||
data_dma_hndl);
|
||||
dma_free_coherent(&pdev->dev,
|
||||
uioc.xferlen, data,
|
||||
data_dma_hndl);
|
||||
}
|
||||
|
||||
free_local_pdev(pdev);
|
||||
|
@ -3336,9 +3347,8 @@ megadev_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
|
|||
}
|
||||
|
||||
if( uioc.xferlen ) {
|
||||
pci_free_consistent(pdev,
|
||||
uioc.xferlen, data,
|
||||
data_dma_hndl);
|
||||
dma_free_coherent(&pdev->dev, uioc.xferlen,
|
||||
data, data_dma_hndl);
|
||||
}
|
||||
|
||||
free_local_pdev(pdev);
|
||||
|
@ -4004,8 +4014,8 @@ mega_internal_dev_inquiry(adapter_t *adapter, u8 ch, u8 tgt,
|
|||
*/
|
||||
if( make_local_pdev(adapter, &pdev) != 0 ) return -1;
|
||||
|
||||
pthru = pci_alloc_consistent(pdev, sizeof(mega_passthru),
|
||||
&pthru_dma_handle);
|
||||
pthru = dma_alloc_coherent(&pdev->dev, sizeof(mega_passthru),
|
||||
&pthru_dma_handle, GFP_KERNEL);
|
||||
|
||||
if( pthru == NULL ) {
|
||||
free_local_pdev(pdev);
|
||||
|
@ -4041,8 +4051,8 @@ mega_internal_dev_inquiry(adapter_t *adapter, u8 ch, u8 tgt,
|
|||
|
||||
rval = mega_internal_command(adapter, &mc, pthru);
|
||||
|
||||
pci_free_consistent(pdev, sizeof(mega_passthru), pthru,
|
||||
pthru_dma_handle);
|
||||
dma_free_coherent(&pdev->dev, sizeof(mega_passthru), pthru,
|
||||
pthru_dma_handle);
|
||||
|
||||
free_local_pdev(pdev);
|
||||
|
||||
|
@ -4267,8 +4277,10 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
/*
|
||||
* Allocate buffer to issue internal commands.
|
||||
*/
|
||||
adapter->mega_buffer = pci_alloc_consistent(adapter->dev,
|
||||
MEGA_BUFFER_SIZE, &adapter->buf_dma_handle);
|
||||
adapter->mega_buffer = dma_alloc_coherent(&adapter->dev->dev,
|
||||
MEGA_BUFFER_SIZE,
|
||||
&adapter->buf_dma_handle,
|
||||
GFP_KERNEL);
|
||||
if (!adapter->mega_buffer) {
|
||||
dev_warn(&pdev->dev, "out of RAM\n");
|
||||
goto out_host_put;
|
||||
|
@ -4427,10 +4439,10 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
|
||||
/* Set the Mode of addressing to 64 bit if we can */
|
||||
if ((adapter->flag & BOARD_64BIT) && (sizeof(dma_addr_t) == 8)) {
|
||||
pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
|
||||
dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
|
||||
adapter->has_64bit_addr = 1;
|
||||
} else {
|
||||
pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
|
||||
dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
|
||||
adapter->has_64bit_addr = 0;
|
||||
}
|
||||
|
||||
|
@ -4469,15 +4481,15 @@ megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
return 0;
|
||||
|
||||
out_free_mbox:
|
||||
pci_free_consistent(adapter->dev, sizeof(mbox64_t),
|
||||
adapter->una_mbox64, adapter->una_mbox64_dma);
|
||||
dma_free_coherent(&adapter->dev->dev, sizeof(mbox64_t),
|
||||
adapter->una_mbox64, adapter->una_mbox64_dma);
|
||||
out_free_irq:
|
||||
free_irq(adapter->host->irq, adapter);
|
||||
out_free_scb_list:
|
||||
kfree(adapter->scb_list);
|
||||
out_free_cmd_buffer:
|
||||
pci_free_consistent(adapter->dev, MEGA_BUFFER_SIZE,
|
||||
adapter->mega_buffer, adapter->buf_dma_handle);
|
||||
dma_free_coherent(&adapter->dev->dev, MEGA_BUFFER_SIZE,
|
||||
adapter->mega_buffer, adapter->buf_dma_handle);
|
||||
out_host_put:
|
||||
scsi_host_put(host);
|
||||
out_iounmap:
|
||||
|
@ -4551,11 +4563,11 @@ megaraid_remove_one(struct pci_dev *pdev)
|
|||
sprintf(buf, "hba%d", adapter->host->host_no);
|
||||
remove_proc_subtree(buf, mega_proc_dir_entry);
|
||||
|
||||
pci_free_consistent(adapter->dev, MEGA_BUFFER_SIZE,
|
||||
adapter->mega_buffer, adapter->buf_dma_handle);
|
||||
dma_free_coherent(&adapter->dev->dev, MEGA_BUFFER_SIZE,
|
||||
adapter->mega_buffer, adapter->buf_dma_handle);
|
||||
kfree(adapter->scb_list);
|
||||
pci_free_consistent(adapter->dev, sizeof(mbox64_t),
|
||||
adapter->una_mbox64, adapter->una_mbox64_dma);
|
||||
dma_free_coherent(&adapter->dev->dev, sizeof(mbox64_t),
|
||||
adapter->una_mbox64, adapter->una_mbox64_dma);
|
||||
|
||||
scsi_host_put(host);
|
||||
pci_disable_device(pdev);
|
||||
|
|
|
@ -78,7 +78,7 @@ unsigned int resetwaittime = MEGASAS_RESET_WAIT_TIME;
|
|||
module_param(resetwaittime, int, 0444);
|
||||
MODULE_PARM_DESC(resetwaittime, "Wait time in (1-180s) after I/O timeout before resetting adapter. Default: 180s");
|
||||
|
||||
int smp_affinity_enable = 1;
|
||||
static int smp_affinity_enable = 1;
|
||||
module_param(smp_affinity_enable, int, 0444);
|
||||
MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)");
|
||||
|
||||
|
|
|
@ -129,8 +129,6 @@ _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc,
|
|||
static int
|
||||
_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc);
|
||||
static void
|
||||
_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc);
|
||||
static void
|
||||
_base_clear_outstanding_commands(struct MPT3SAS_ADAPTER *ioc);
|
||||
|
||||
/**
|
||||
|
@ -680,7 +678,7 @@ _base_fault_reset_work(struct work_struct *work)
|
|||
ioc->shost_recovery = 1;
|
||||
spin_unlock_irqrestore(
|
||||
&ioc->ioc_reset_in_progress_lock, flags);
|
||||
_base_mask_interrupts(ioc);
|
||||
mpt3sas_base_mask_interrupts(ioc);
|
||||
_base_clear_outstanding_commands(ioc);
|
||||
}
|
||||
|
||||
|
@ -1466,13 +1464,13 @@ _base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid)
|
|||
}
|
||||
|
||||
/**
|
||||
* _base_mask_interrupts - disable interrupts
|
||||
* mpt3sas_base_mask_interrupts - disable interrupts
|
||||
* @ioc: per adapter object
|
||||
*
|
||||
* Disabling ResetIRQ, Reply and Doorbell Interrupts
|
||||
*/
|
||||
static void
|
||||
_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc)
|
||||
void
|
||||
mpt3sas_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc)
|
||||
{
|
||||
u32 him_register;
|
||||
|
||||
|
@ -1484,13 +1482,13 @@ _base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc)
|
|||
}
|
||||
|
||||
/**
|
||||
* _base_unmask_interrupts - enable interrupts
|
||||
* mpt3sas_base_unmask_interrupts - enable interrupts
|
||||
* @ioc: per adapter object
|
||||
*
|
||||
* Enabling only Reply Interrupts
|
||||
*/
|
||||
static void
|
||||
_base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc)
|
||||
void
|
||||
mpt3sas_base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc)
|
||||
{
|
||||
u32 him_register;
|
||||
|
||||
|
@ -1628,7 +1626,7 @@ _base_process_reply_queue(struct adapter_reply_queue *reply_q)
|
|||
* So that FW can find enough entries to post the Reply
|
||||
* Descriptors in the reply descriptor post queue.
|
||||
*/
|
||||
if (!base_mod64(completed_cmds, ioc->thresh_hold)) {
|
||||
if (completed_cmds >= ioc->thresh_hold) {
|
||||
if (ioc->combined_reply_queue) {
|
||||
writel(reply_q->reply_post_host_index |
|
||||
((msix_index & 7) <<
|
||||
|
@ -1787,12 +1785,14 @@ _base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc)
|
|||
/**
|
||||
* mpt3sas_base_sync_reply_irqs - flush pending MSIX interrupts
|
||||
* @ioc: per adapter object
|
||||
* @poll: poll over reply descriptor pools incase interrupt for
|
||||
* timed-out SCSI command got delayed
|
||||
* Context: non ISR conext
|
||||
*
|
||||
* Called when a Task Management request has completed.
|
||||
*/
|
||||
void
|
||||
mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc)
|
||||
mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc, u8 poll)
|
||||
{
|
||||
struct adapter_reply_queue *reply_q;
|
||||
|
||||
|
@ -1809,19 +1809,25 @@ mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc)
|
|||
/* TMs are on msix_index == 0 */
|
||||
if (reply_q->msix_index == 0)
|
||||
continue;
|
||||
synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index));
|
||||
if (reply_q->irq_poll_scheduled) {
|
||||
/* Calling irq_poll_disable will wait for any pending
|
||||
* callbacks to have completed.
|
||||
*/
|
||||
irq_poll_disable(&reply_q->irqpoll);
|
||||
irq_poll_enable(&reply_q->irqpoll);
|
||||
reply_q->irq_poll_scheduled = false;
|
||||
reply_q->irq_line_enable = true;
|
||||
enable_irq(reply_q->os_irq);
|
||||
continue;
|
||||
/* check how the scheduled poll has ended,
|
||||
* clean up only if necessary
|
||||
*/
|
||||
if (reply_q->irq_poll_scheduled) {
|
||||
reply_q->irq_poll_scheduled = false;
|
||||
reply_q->irq_line_enable = true;
|
||||
enable_irq(reply_q->os_irq);
|
||||
}
|
||||
}
|
||||
synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index));
|
||||
}
|
||||
if (poll)
|
||||
_base_process_reply_queue(reply_q);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -3372,7 +3378,7 @@ mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
|
|||
goto out_fail;
|
||||
}
|
||||
|
||||
_base_mask_interrupts(ioc);
|
||||
mpt3sas_base_mask_interrupts(ioc);
|
||||
|
||||
r = _base_get_ioc_facts(ioc);
|
||||
if (r) {
|
||||
|
@ -5257,7 +5263,6 @@ _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
|
|||
_base_release_memory_pools(ioc);
|
||||
goto retry_allocation;
|
||||
}
|
||||
memset(ioc->request, 0, sz);
|
||||
|
||||
if (retry_sz)
|
||||
ioc_err(ioc, "request pool: dma_alloc_coherent succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kb)\n",
|
||||
|
@ -5618,6 +5623,23 @@ _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout)
|
|||
return current_state;
|
||||
}
|
||||
|
||||
/**
|
||||
* _base_dump_reg_set - This function will print hexdump of register set.
|
||||
* @ioc: per adapter object
|
||||
*
|
||||
* Returns nothing.
|
||||
*/
|
||||
static inline void
|
||||
_base_dump_reg_set(struct MPT3SAS_ADAPTER *ioc)
|
||||
{
|
||||
unsigned int i, sz = 256;
|
||||
u32 __iomem *reg = (u32 __iomem *)ioc->chip;
|
||||
|
||||
ioc_info(ioc, "System Register set:\n");
|
||||
for (i = 0; i < (sz / sizeof(u32)); i++)
|
||||
pr_info("%08x: %08x\n", (i * 4), readl(®[i]));
|
||||
}
|
||||
|
||||
/**
|
||||
* _base_wait_for_doorbell_int - waiting for controller interrupt(generated by
|
||||
* a write to the doorbell)
|
||||
|
@ -6797,6 +6819,7 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
|
|||
if (count++ > 20) {
|
||||
ioc_info(ioc,
|
||||
"Stop writing magic sequence after 20 retries\n");
|
||||
_base_dump_reg_set(ioc);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -6825,6 +6848,7 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
|
|||
if (host_diagnostic == 0xFFFFFFFF) {
|
||||
ioc_info(ioc,
|
||||
"Invalid host diagnostic register value\n");
|
||||
_base_dump_reg_set(ioc);
|
||||
goto out;
|
||||
}
|
||||
if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
|
||||
|
@ -6859,6 +6883,7 @@ _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
|
|||
if (ioc_state) {
|
||||
ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
|
||||
__func__, ioc_state);
|
||||
_base_dump_reg_set(ioc);
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -7101,7 +7126,7 @@ _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
|
|||
|
||||
skip_init_reply_post_host_index:
|
||||
|
||||
_base_unmask_interrupts(ioc);
|
||||
mpt3sas_base_unmask_interrupts(ioc);
|
||||
|
||||
if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
|
||||
r = _base_display_fwpkg_version(ioc);
|
||||
|
@ -7150,7 +7175,7 @@ mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
|
|||
/* synchronizing freeing resource with pci_access_mutex lock */
|
||||
mutex_lock(&ioc->pci_access_mutex);
|
||||
if (ioc->chip_phys && ioc->chip) {
|
||||
_base_mask_interrupts(ioc);
|
||||
mpt3sas_base_mask_interrupts(ioc);
|
||||
ioc->shost_recovery = 1;
|
||||
_base_make_ioc_ready(ioc, SOFT_RESET);
|
||||
ioc->shost_recovery = 0;
|
||||
|
@ -7716,7 +7741,7 @@ mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
|
|||
}
|
||||
_base_pre_reset_handler(ioc);
|
||||
mpt3sas_wait_for_commands_to_complete(ioc);
|
||||
_base_mask_interrupts(ioc);
|
||||
mpt3sas_base_mask_interrupts(ioc);
|
||||
r = _base_make_ioc_ready(ioc, type);
|
||||
if (r)
|
||||
goto out;
|
||||
|
|
|
@ -76,8 +76,8 @@
|
|||
#define MPT3SAS_DRIVER_NAME "mpt3sas"
|
||||
#define MPT3SAS_AUTHOR "Avago Technologies <MPT-FusionLinux.pdl@avagotech.com>"
|
||||
#define MPT3SAS_DESCRIPTION "LSI MPT Fusion SAS 3.0 Device Driver"
|
||||
#define MPT3SAS_DRIVER_VERSION "34.100.00.00"
|
||||
#define MPT3SAS_MAJOR_VERSION 34
|
||||
#define MPT3SAS_DRIVER_VERSION "35.100.00.00"
|
||||
#define MPT3SAS_MAJOR_VERSION 35
|
||||
#define MPT3SAS_MINOR_VERSION 100
|
||||
#define MPT3SAS_BUILD_VERSION 0
|
||||
#define MPT3SAS_RELEASE_VERSION 00
|
||||
|
@ -1036,6 +1036,8 @@ typedef void (*MPT3SAS_FLUSH_RUNNING_CMDS)(struct MPT3SAS_ADAPTER *ioc);
|
|||
* @firmware_event_thread: ""
|
||||
* @fw_event_lock:
|
||||
* @fw_event_list: list of fw events
|
||||
* @current_evet: current processing firmware event
|
||||
* @fw_event_cleanup: set to one while cleaning up the fw events
|
||||
* @aen_event_read_flag: event log was read
|
||||
* @broadcast_aen_busy: broadcast aen waiting to be serviced
|
||||
* @shost_recovery: host reset in progress
|
||||
|
@ -1217,6 +1219,8 @@ struct MPT3SAS_ADAPTER {
|
|||
struct workqueue_struct *firmware_event_thread;
|
||||
spinlock_t fw_event_lock;
|
||||
struct list_head fw_event_list;
|
||||
struct fw_event_work *current_event;
|
||||
u8 fw_events_cleanup;
|
||||
|
||||
/* misc flags */
|
||||
int aen_event_read_flag;
|
||||
|
@ -1524,7 +1528,9 @@ __le32 mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc,
|
|||
u16 smid);
|
||||
void *mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid);
|
||||
dma_addr_t mpt3sas_base_get_pcie_sgl_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid);
|
||||
void mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc);
|
||||
void mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc, u8 poll);
|
||||
void mpt3sas_base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc);
|
||||
void mpt3sas_base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc);
|
||||
|
||||
void mpt3sas_base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
|
||||
u16 handle);
|
||||
|
@ -1604,11 +1610,12 @@ void mpt3sas_scsih_clear_outstanding_scsi_tm_commands(
|
|||
struct MPT3SAS_ADAPTER *ioc);
|
||||
void mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc);
|
||||
|
||||
int mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
|
||||
u8 type, u16 smid_task, u16 msix_task, u8 timeout, u8 tr_method);
|
||||
int mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
|
||||
uint channel, uint id, u64 lun, u8 type, u16 smid_task,
|
||||
u16 msix_task, u8 timeout, u8 tr_method);
|
||||
int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
|
||||
u64 lun, u8 type, u16 smid_task, u16 msix_task,
|
||||
u8 timeout, u8 tr_method);
|
||||
uint channel, uint id, u64 lun, u8 type, u16 smid_task,
|
||||
u16 msix_task, u8 timeout, u8 tr_method);
|
||||
|
||||
void mpt3sas_scsih_set_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle);
|
||||
void mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle);
|
||||
|
|
|
@ -371,7 +371,7 @@ _config_request(struct MPT3SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
|
|||
}
|
||||
|
||||
r = 0;
|
||||
memset(mpi_reply, 0, sizeof(Mpi2ConfigReply_t));
|
||||
memset(ioc->config_cmds.reply, 0, sizeof(Mpi2ConfigReply_t));
|
||||
ioc->config_cmds.status = MPT3_CMD_PENDING;
|
||||
config_request = mpt3sas_base_get_msg_frame(ioc, smid);
|
||||
ioc->config_cmds.smid = smid;
|
||||
|
|
|
@ -1109,13 +1109,15 @@ _ctl_do_mpt_command(struct MPT3SAS_ADAPTER *ioc, struct mpt3_ioctl_command karg,
|
|||
pcie_device->device_info))))
|
||||
mpt3sas_scsih_issue_locked_tm(ioc,
|
||||
le16_to_cpu(mpi_request->FunctionDependent1),
|
||||
0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
|
||||
0, 0, 0,
|
||||
MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
|
||||
0, pcie_device->reset_timeout,
|
||||
MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE);
|
||||
else
|
||||
mpt3sas_scsih_issue_locked_tm(ioc,
|
||||
le16_to_cpu(mpi_request->FunctionDependent1),
|
||||
0, MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
|
||||
0, 0, 0,
|
||||
MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0,
|
||||
0, 30, MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET);
|
||||
} else
|
||||
mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
|
||||
|
@ -3384,12 +3386,10 @@ host_trace_buffer_enable_store(struct device *cdev,
|
|||
&&
|
||||
(ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
|
||||
MPT3_DIAG_BUFFER_IS_APP_OWNED)) {
|
||||
pci_free_consistent(ioc->pdev,
|
||||
ioc->diag_buffer_sz[
|
||||
MPI2_DIAG_BUF_TYPE_TRACE],
|
||||
ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE],
|
||||
ioc->diag_buffer_dma[
|
||||
MPI2_DIAG_BUF_TYPE_TRACE]);
|
||||
dma_free_coherent(&ioc->pdev->dev,
|
||||
ioc->diag_buffer_sz[MPI2_DIAG_BUF_TYPE_TRACE],
|
||||
ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE],
|
||||
ioc->diag_buffer_dma[MPI2_DIAG_BUF_TYPE_TRACE]);
|
||||
ioc->diag_buffer[MPI2_DIAG_BUF_TYPE_TRACE] =
|
||||
NULL;
|
||||
}
|
||||
|
|
|
@ -1512,6 +1512,66 @@ _scsih_is_nvme_pciescsi_device(u32 device_info)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* _scsih_scsi_lookup_find_by_target - search for matching channel:id
|
||||
* @ioc: per adapter object
|
||||
* @id: target id
|
||||
* @channel: channel
|
||||
* Context: This function will acquire ioc->scsi_lookup_lock.
|
||||
*
|
||||
* This will search for a matching channel:id in the scsi_lookup array,
|
||||
* returning 1 if found.
|
||||
*/
|
||||
static u8
|
||||
_scsih_scsi_lookup_find_by_target(struct MPT3SAS_ADAPTER *ioc, int id,
|
||||
int channel)
|
||||
{
|
||||
int smid;
|
||||
struct scsi_cmnd *scmd;
|
||||
|
||||
for (smid = 1;
|
||||
smid <= ioc->shost->can_queue; smid++) {
|
||||
scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
|
||||
if (!scmd)
|
||||
continue;
|
||||
if (scmd->device->id == id &&
|
||||
scmd->device->channel == channel)
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* _scsih_scsi_lookup_find_by_lun - search for matching channel:id:lun
|
||||
* @ioc: per adapter object
|
||||
* @id: target id
|
||||
* @lun: lun number
|
||||
* @channel: channel
|
||||
* Context: This function will acquire ioc->scsi_lookup_lock.
|
||||
*
|
||||
* This will search for a matching channel:id:lun in the scsi_lookup array,
|
||||
* returning 1 if found.
|
||||
*/
|
||||
static u8
|
||||
_scsih_scsi_lookup_find_by_lun(struct MPT3SAS_ADAPTER *ioc, int id,
|
||||
unsigned int lun, int channel)
|
||||
{
|
||||
int smid;
|
||||
struct scsi_cmnd *scmd;
|
||||
|
||||
for (smid = 1; smid <= ioc->shost->can_queue; smid++) {
|
||||
|
||||
scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
|
||||
if (!scmd)
|
||||
continue;
|
||||
if (scmd->device->id == id &&
|
||||
scmd->device->channel == channel &&
|
||||
scmd->device->lun == lun)
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* mpt3sas_scsih_scsi_lookup_get - returns scmd entry
|
||||
* @ioc: per adapter object
|
||||
|
@ -2700,10 +2760,102 @@ mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* scsih_tm_cmd_map_status - map the target reset & LUN reset TM status
|
||||
* @ioc - per adapter object
|
||||
* @channel - the channel assigned by the OS
|
||||
* @id: the id assigned by the OS
|
||||
* @lun: lun number
|
||||
* @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
|
||||
* @smid_task: smid assigned to the task
|
||||
*
|
||||
* Look whether TM has aborted the timed out SCSI command, if
|
||||
* TM has aborted the IO then return SUCCESS else return FAILED.
|
||||
*/
|
||||
static int
|
||||
scsih_tm_cmd_map_status(struct MPT3SAS_ADAPTER *ioc, uint channel,
|
||||
uint id, uint lun, u8 type, u16 smid_task)
|
||||
{
|
||||
|
||||
if (smid_task <= ioc->shost->can_queue) {
|
||||
switch (type) {
|
||||
case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
|
||||
if (!(_scsih_scsi_lookup_find_by_target(ioc,
|
||||
id, channel)))
|
||||
return SUCCESS;
|
||||
break;
|
||||
case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
|
||||
case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
|
||||
if (!(_scsih_scsi_lookup_find_by_lun(ioc, id,
|
||||
lun, channel)))
|
||||
return SUCCESS;
|
||||
break;
|
||||
default:
|
||||
return SUCCESS;
|
||||
}
|
||||
} else if (smid_task == ioc->scsih_cmds.smid) {
|
||||
if ((ioc->scsih_cmds.status & MPT3_CMD_COMPLETE) ||
|
||||
(ioc->scsih_cmds.status & MPT3_CMD_NOT_USED))
|
||||
return SUCCESS;
|
||||
} else if (smid_task == ioc->ctl_cmds.smid) {
|
||||
if ((ioc->ctl_cmds.status & MPT3_CMD_COMPLETE) ||
|
||||
(ioc->ctl_cmds.status & MPT3_CMD_NOT_USED))
|
||||
return SUCCESS;
|
||||
}
|
||||
|
||||
return FAILED;
|
||||
}
|
||||
|
||||
/**
|
||||
* scsih_tm_post_processing - post processing of target & LUN reset
|
||||
* @ioc - per adapter object
|
||||
* @handle: device handle
|
||||
* @channel - the channel assigned by the OS
|
||||
* @id: the id assigned by the OS
|
||||
* @lun: lun number
|
||||
* @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
|
||||
* @smid_task: smid assigned to the task
|
||||
*
|
||||
* Post processing of target & LUN reset. Due to interrupt latency
|
||||
* issue it possible that interrupt for aborted IO might not be
|
||||
* received yet. So before returning failure status, poll the
|
||||
* reply descriptor pools for the reply of timed out SCSI command.
|
||||
* Return FAILED status if reply for timed out is not received
|
||||
* otherwise return SUCCESS.
|
||||
*/
|
||||
static int
|
||||
scsih_tm_post_processing(struct MPT3SAS_ADAPTER *ioc, u16 handle,
|
||||
uint channel, uint id, uint lun, u8 type, u16 smid_task)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task);
|
||||
if (rc == SUCCESS)
|
||||
return rc;
|
||||
|
||||
ioc_info(ioc,
|
||||
"Poll ReplyDescriptor queues for completion of"
|
||||
" smid(%d), task_type(0x%02x), handle(0x%04x)\n",
|
||||
smid_task, type, handle);
|
||||
|
||||
/*
|
||||
* Due to interrupt latency issues, driver may receive interrupt for
|
||||
* TM first and then for aborted SCSI IO command. So, poll all the
|
||||
* ReplyDescriptor pools before returning the FAILED status to SML.
|
||||
*/
|
||||
mpt3sas_base_mask_interrupts(ioc);
|
||||
mpt3sas_base_sync_reply_irqs(ioc, 1);
|
||||
mpt3sas_base_unmask_interrupts(ioc);
|
||||
|
||||
return scsih_tm_cmd_map_status(ioc, channel, id, lun, type, smid_task);
|
||||
}
|
||||
|
||||
/**
|
||||
* mpt3sas_scsih_issue_tm - main routine for sending tm requests
|
||||
* @ioc: per adapter struct
|
||||
* @handle: device handle
|
||||
* @channel: the channel assigned by the OS
|
||||
* @id: the id assigned by the OS
|
||||
* @lun: lun number
|
||||
* @type: MPI2_SCSITASKMGMT_TASKTYPE__XXX (defined in mpi2_init.h)
|
||||
* @smid_task: smid assigned to the task
|
||||
|
@ -2720,11 +2872,13 @@ mpt3sas_scsih_clear_tm_flag(struct MPT3SAS_ADAPTER *ioc, u16 handle)
|
|||
* Return: SUCCESS or FAILED.
|
||||
*/
|
||||
int
|
||||
mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
|
||||
u8 type, u16 smid_task, u16 msix_task, u8 timeout, u8 tr_method)
|
||||
mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, uint channel,
|
||||
uint id, u64 lun, u8 type, u16 smid_task, u16 msix_task,
|
||||
u8 timeout, u8 tr_method)
|
||||
{
|
||||
Mpi2SCSITaskManagementRequest_t *mpi_request;
|
||||
Mpi2SCSITaskManagementReply_t *mpi_reply;
|
||||
Mpi25SCSIIORequest_t *request;
|
||||
u16 smid = 0;
|
||||
u32 ioc_state;
|
||||
int rc;
|
||||
|
@ -2780,7 +2934,9 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
|
|||
mpi_request->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
|
||||
mpi_request->DevHandle = cpu_to_le16(handle);
|
||||
mpi_request->TaskType = type;
|
||||
mpi_request->MsgFlags = tr_method;
|
||||
if (type == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
|
||||
type == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
|
||||
mpi_request->MsgFlags = tr_method;
|
||||
mpi_request->TaskMID = cpu_to_le16(smid_task);
|
||||
int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
|
||||
mpt3sas_scsih_set_tm_flag(ioc, handle);
|
||||
|
@ -2800,7 +2956,7 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
|
|||
}
|
||||
|
||||
/* sync IRQs in case those were busy during flush. */
|
||||
mpt3sas_base_sync_reply_irqs(ioc);
|
||||
mpt3sas_base_sync_reply_irqs(ioc, 0);
|
||||
|
||||
if (ioc->tm_cmds.status & MPT3_CMD_REPLY_VALID) {
|
||||
mpt3sas_trigger_master(ioc, MASTER_TRIGGER_TASK_MANAGMENT);
|
||||
|
@ -2817,7 +2973,44 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
|
|||
sizeof(Mpi2SCSITaskManagementRequest_t)/4);
|
||||
}
|
||||
}
|
||||
rc = SUCCESS;
|
||||
|
||||
switch (type) {
|
||||
case MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
|
||||
rc = SUCCESS;
|
||||
/*
|
||||
* If DevHandle filed in smid_task's entry of request pool
|
||||
* doesn't match with device handle on which this task abort
|
||||
* TM is received then it means that TM has successfully
|
||||
* aborted the timed out command. Since smid_task's entry in
|
||||
* request pool will be memset to zero once the timed out
|
||||
* command is returned to the SML. If the command is not
|
||||
* aborted then smid_task’s entry won’t be cleared and it
|
||||
* will have same DevHandle value on which this task abort TM
|
||||
* is received and driver will return the TM status as FAILED.
|
||||
*/
|
||||
request = mpt3sas_base_get_msg_frame(ioc, smid_task);
|
||||
if (le16_to_cpu(request->DevHandle) != handle)
|
||||
break;
|
||||
|
||||
ioc_info(ioc, "Task abort tm failed: handle(0x%04x),"
|
||||
"timeout(%d) tr_method(0x%x) smid(%d) msix_index(%d)\n",
|
||||
handle, timeout, tr_method, smid_task, msix_task);
|
||||
rc = FAILED;
|
||||
break;
|
||||
|
||||
case MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
|
||||
case MPI2_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET:
|
||||
case MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET:
|
||||
rc = scsih_tm_post_processing(ioc, handle, channel, id, lun,
|
||||
type, smid_task);
|
||||
break;
|
||||
case MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK:
|
||||
rc = SUCCESS;
|
||||
break;
|
||||
default:
|
||||
rc = FAILED;
|
||||
break;
|
||||
}
|
||||
|
||||
out:
|
||||
mpt3sas_scsih_clear_tm_flag(ioc, handle);
|
||||
|
@ -2826,14 +3019,14 @@ mpt3sas_scsih_issue_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle, u64 lun,
|
|||
}
|
||||
|
||||
int mpt3sas_scsih_issue_locked_tm(struct MPT3SAS_ADAPTER *ioc, u16 handle,
|
||||
u64 lun, u8 type, u16 smid_task, u16 msix_task,
|
||||
u8 timeout, u8 tr_method)
|
||||
uint channel, uint id, u64 lun, u8 type, u16 smid_task,
|
||||
u16 msix_task, u8 timeout, u8 tr_method)
|
||||
{
|
||||
int ret;
|
||||
|
||||
mutex_lock(&ioc->tm_cmds.mutex);
|
||||
ret = mpt3sas_scsih_issue_tm(ioc, handle, lun, type, smid_task,
|
||||
msix_task, timeout, tr_method);
|
||||
ret = mpt3sas_scsih_issue_tm(ioc, handle, channel, id, lun, type,
|
||||
smid_task, msix_task, timeout, tr_method);
|
||||
mutex_unlock(&ioc->tm_cmds.mutex);
|
||||
|
||||
return ret;
|
||||
|
@ -2980,7 +3173,8 @@ scsih_abort(struct scsi_cmnd *scmd)
|
|||
if (pcie_device && (!ioc->tm_custom_handling) &&
|
||||
(!(mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info))))
|
||||
timeout = ioc->nvme_abort_timeout;
|
||||
r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->lun,
|
||||
r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
|
||||
scmd->device->id, scmd->device->lun,
|
||||
MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
|
||||
st->smid, st->msix_io, timeout, 0);
|
||||
/* Command must be cleared after abort */
|
||||
|
@ -3056,7 +3250,8 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
|
|||
} else
|
||||
tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
|
||||
|
||||
r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->lun,
|
||||
r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
|
||||
scmd->device->id, scmd->device->lun,
|
||||
MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, 0, 0,
|
||||
tr_timeout, tr_method);
|
||||
/* Check for busy commands after reset */
|
||||
|
@ -3134,7 +3329,8 @@ scsih_target_reset(struct scsi_cmnd *scmd)
|
|||
tr_method = MPI26_SCSITASKMGMT_MSGFLAGS_PROTOCOL_LVL_RST_PCIE;
|
||||
} else
|
||||
tr_method = MPI2_SCSITASKMGMT_MSGFLAGS_LINK_RESET;
|
||||
r = mpt3sas_scsih_issue_locked_tm(ioc, handle, 0,
|
||||
r = mpt3sas_scsih_issue_locked_tm(ioc, handle, scmd->device->channel,
|
||||
scmd->device->id, 0,
|
||||
MPI2_SCSITASKMGMT_TASKTYPE_TARGET_RESET, 0, 0,
|
||||
tr_timeout, tr_method);
|
||||
/* Check for busy commands after reset */
|
||||
|
@ -3323,11 +3519,13 @@ _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
|
|||
{
|
||||
struct fw_event_work *fw_event;
|
||||
|
||||
if (list_empty(&ioc->fw_event_list) ||
|
||||
if ((list_empty(&ioc->fw_event_list) && !ioc->current_event) ||
|
||||
!ioc->firmware_event_thread || in_interrupt())
|
||||
return;
|
||||
|
||||
while ((fw_event = dequeue_next_fw_event(ioc))) {
|
||||
ioc->fw_events_cleanup = 1;
|
||||
while ((fw_event = dequeue_next_fw_event(ioc)) ||
|
||||
(fw_event = ioc->current_event)) {
|
||||
/*
|
||||
* Wait on the fw_event to complete. If this returns 1, then
|
||||
* the event was never executed, and we need a put for the
|
||||
|
@ -3341,6 +3539,7 @@ _scsih_fw_event_cleanup_queue(struct MPT3SAS_ADAPTER *ioc)
|
|||
|
||||
fw_event_work_put(fw_event);
|
||||
}
|
||||
ioc->fw_events_cleanup = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -7527,7 +7726,7 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
|
|||
goto out;
|
||||
|
||||
spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
|
||||
r = mpt3sas_scsih_issue_tm(ioc, handle, lun,
|
||||
r = mpt3sas_scsih_issue_tm(ioc, handle, 0, 0, lun,
|
||||
MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK, st->smid,
|
||||
st->msix_io, 30, 0);
|
||||
if (r == FAILED) {
|
||||
|
@ -7568,9 +7767,9 @@ _scsih_sas_broadcast_primitive_event(struct MPT3SAS_ADAPTER *ioc,
|
|||
if (ioc->shost_recovery)
|
||||
goto out_no_lock;
|
||||
|
||||
r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->lun,
|
||||
MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK, st->smid,
|
||||
st->msix_io, 30, 0);
|
||||
r = mpt3sas_scsih_issue_tm(ioc, handle, sdev->channel, sdev->id,
|
||||
sdev->lun, MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
|
||||
st->smid, st->msix_io, 30, 0);
|
||||
if (r == FAILED || st->cb_idx != 0xFF) {
|
||||
sdev_printk(KERN_WARNING, sdev,
|
||||
"mpt3sas_scsih_issue_tm: ABORT_TASK: FAILED : "
|
||||
|
@ -9421,11 +9620,13 @@ mpt3sas_scsih_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
|
|||
static void
|
||||
_mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
|
||||
{
|
||||
ioc->current_event = fw_event;
|
||||
_scsih_fw_event_del_from_list(ioc, fw_event);
|
||||
|
||||
/* the queue is being flushed so ignore this event */
|
||||
if (ioc->remove_host || ioc->pci_error_recovery) {
|
||||
fw_event_work_put(fw_event);
|
||||
ioc->current_event = NULL;
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -9439,10 +9640,10 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
|
|||
while (scsi_host_in_recovery(ioc->shost) ||
|
||||
ioc->shost_recovery) {
|
||||
/*
|
||||
* If we're unloading, bail. Otherwise, this can become
|
||||
* an infinite loop.
|
||||
* If we're unloading or cancelling the work, bail.
|
||||
* Otherwise, this can become an infinite loop.
|
||||
*/
|
||||
if (ioc->remove_host)
|
||||
if (ioc->remove_host || ioc->fw_events_cleanup)
|
||||
goto out;
|
||||
ssleep(1);
|
||||
}
|
||||
|
@ -9503,11 +9704,13 @@ _mpt3sas_fw_work(struct MPT3SAS_ADAPTER *ioc, struct fw_event_work *fw_event)
|
|||
break;
|
||||
case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
|
||||
_scsih_pcie_topology_change_event(ioc, fw_event);
|
||||
ioc->current_event = NULL;
|
||||
return;
|
||||
break;
|
||||
}
|
||||
out:
|
||||
fw_event_work_put(fw_event);
|
||||
ioc->current_event = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -9888,6 +10091,34 @@ _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
|
|||
mutex_unlock(&ioc->scsih_cmds.mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
* _scsih_get_shost_and_ioc - get shost and ioc
|
||||
* and verify whether they are NULL or not
|
||||
* @pdev: PCI device struct
|
||||
* @shost: address of scsi host pointer
|
||||
* @ioc: address of HBA adapter pointer
|
||||
*
|
||||
* Return zero if *shost and *ioc are not NULL otherwise return error number.
|
||||
*/
|
||||
static int
|
||||
_scsih_get_shost_and_ioc(struct pci_dev *pdev,
|
||||
struct Scsi_Host **shost, struct MPT3SAS_ADAPTER **ioc)
|
||||
{
|
||||
*shost = pci_get_drvdata(pdev);
|
||||
if (*shost == NULL) {
|
||||
dev_err(&pdev->dev, "pdev's driver data is null\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
*ioc = shost_priv(*shost);
|
||||
if (*ioc == NULL) {
|
||||
dev_err(&pdev->dev, "shost's private data is null\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* scsih_remove - detach and remove add host
|
||||
* @pdev: PCI device struct
|
||||
|
@ -9896,8 +10127,8 @@ _scsih_ir_shutdown(struct MPT3SAS_ADAPTER *ioc)
|
|||
*/
|
||||
static void scsih_remove(struct pci_dev *pdev)
|
||||
{
|
||||
struct Scsi_Host *shost = pci_get_drvdata(pdev);
|
||||
struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
|
||||
struct Scsi_Host *shost;
|
||||
struct MPT3SAS_ADAPTER *ioc;
|
||||
struct _sas_port *mpt3sas_port, *next_port;
|
||||
struct _raid_device *raid_device, *next;
|
||||
struct MPT3SAS_TARGET *sas_target_priv_data;
|
||||
|
@ -9906,6 +10137,9 @@ static void scsih_remove(struct pci_dev *pdev)
|
|||
unsigned long flags;
|
||||
Mpi2ConfigReply_t mpi_reply;
|
||||
|
||||
if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
|
||||
return;
|
||||
|
||||
ioc->remove_host = 1;
|
||||
|
||||
if (!pci_device_is_present(pdev))
|
||||
|
@ -9985,12 +10219,15 @@ static void scsih_remove(struct pci_dev *pdev)
|
|||
static void
|
||||
scsih_shutdown(struct pci_dev *pdev)
|
||||
{
|
||||
struct Scsi_Host *shost = pci_get_drvdata(pdev);
|
||||
struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
|
||||
struct Scsi_Host *shost;
|
||||
struct MPT3SAS_ADAPTER *ioc;
|
||||
struct workqueue_struct *wq;
|
||||
unsigned long flags;
|
||||
Mpi2ConfigReply_t mpi_reply;
|
||||
|
||||
if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
|
||||
return;
|
||||
|
||||
ioc->remove_host = 1;
|
||||
|
||||
if (!pci_device_is_present(pdev))
|
||||
|
@ -10560,6 +10797,10 @@ _scsih_determine_hba_mpi_version(struct pci_dev *pdev)
|
|||
case MPI26_MFGPAGE_DEVID_HARD_SEC_3916:
|
||||
case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
|
||||
case MPI26_MFGPAGE_DEVID_HARD_SEC_3816:
|
||||
case MPI26_MFGPAGE_DEVID_INVALID0_3916:
|
||||
case MPI26_MFGPAGE_DEVID_INVALID1_3916:
|
||||
case MPI26_MFGPAGE_DEVID_INVALID0_3816:
|
||||
case MPI26_MFGPAGE_DEVID_INVALID1_3816:
|
||||
return MPI26_VERSION;
|
||||
}
|
||||
return 0;
|
||||
|
@ -10649,6 +10890,20 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
case MPI26_ATLAS_PCIe_SWITCH_DEVID:
|
||||
ioc->is_gen35_ioc = 1;
|
||||
break;
|
||||
case MPI26_MFGPAGE_DEVID_INVALID0_3816:
|
||||
case MPI26_MFGPAGE_DEVID_INVALID0_3916:
|
||||
dev_err(&pdev->dev,
|
||||
"HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Invalid",
|
||||
pdev->device, pdev->subsystem_vendor,
|
||||
pdev->subsystem_device);
|
||||
return 1;
|
||||
case MPI26_MFGPAGE_DEVID_INVALID1_3816:
|
||||
case MPI26_MFGPAGE_DEVID_INVALID1_3916:
|
||||
dev_err(&pdev->dev,
|
||||
"HBA with DeviceId 0x%04x, sub VendorId 0x%04x, sub DeviceId 0x%04x is Tampered",
|
||||
pdev->device, pdev->subsystem_vendor,
|
||||
pdev->subsystem_device);
|
||||
return 1;
|
||||
case MPI26_MFGPAGE_DEVID_CFG_SEC_3816:
|
||||
case MPI26_MFGPAGE_DEVID_CFG_SEC_3916:
|
||||
dev_info(&pdev->dev,
|
||||
|
@ -10840,9 +11095,14 @@ _scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
static int
|
||||
scsih_suspend(struct pci_dev *pdev, pm_message_t state)
|
||||
{
|
||||
struct Scsi_Host *shost = pci_get_drvdata(pdev);
|
||||
struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
|
||||
struct Scsi_Host *shost;
|
||||
struct MPT3SAS_ADAPTER *ioc;
|
||||
pci_power_t device_state;
|
||||
int rc;
|
||||
|
||||
rc = _scsih_get_shost_and_ioc(pdev, &shost, &ioc);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
mpt3sas_base_stop_watchdog(ioc);
|
||||
flush_scheduled_work();
|
||||
|
@ -10867,11 +11127,15 @@ scsih_suspend(struct pci_dev *pdev, pm_message_t state)
|
|||
static int
|
||||
scsih_resume(struct pci_dev *pdev)
|
||||
{
|
||||
struct Scsi_Host *shost = pci_get_drvdata(pdev);
|
||||
struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
|
||||
struct Scsi_Host *shost;
|
||||
struct MPT3SAS_ADAPTER *ioc;
|
||||
pci_power_t device_state = pdev->current_state;
|
||||
int r;
|
||||
|
||||
r = _scsih_get_shost_and_ioc(pdev, &shost, &ioc);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
ioc_info(ioc, "pdev=0x%p, slot=%s, previous operating state [D%d]\n",
|
||||
pdev, pci_name(pdev), device_state);
|
||||
|
||||
|
@ -10902,8 +11166,11 @@ scsih_resume(struct pci_dev *pdev)
|
|||
static pci_ers_result_t
|
||||
scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
|
||||
{
|
||||
struct Scsi_Host *shost = pci_get_drvdata(pdev);
|
||||
struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
|
||||
struct Scsi_Host *shost;
|
||||
struct MPT3SAS_ADAPTER *ioc;
|
||||
|
||||
if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
|
||||
return PCI_ERS_RESULT_DISCONNECT;
|
||||
|
||||
ioc_info(ioc, "PCI error: detected callback, state(%d)!!\n", state);
|
||||
|
||||
|
@ -10938,10 +11205,13 @@ scsih_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
|
|||
static pci_ers_result_t
|
||||
scsih_pci_slot_reset(struct pci_dev *pdev)
|
||||
{
|
||||
struct Scsi_Host *shost = pci_get_drvdata(pdev);
|
||||
struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
|
||||
struct Scsi_Host *shost;
|
||||
struct MPT3SAS_ADAPTER *ioc;
|
||||
int rc;
|
||||
|
||||
if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
|
||||
return PCI_ERS_RESULT_DISCONNECT;
|
||||
|
||||
ioc_info(ioc, "PCI error: slot reset callback!!\n");
|
||||
|
||||
ioc->pci_error_recovery = 0;
|
||||
|
@ -10974,8 +11244,11 @@ scsih_pci_slot_reset(struct pci_dev *pdev)
|
|||
static void
|
||||
scsih_pci_resume(struct pci_dev *pdev)
|
||||
{
|
||||
struct Scsi_Host *shost = pci_get_drvdata(pdev);
|
||||
struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
|
||||
struct Scsi_Host *shost;
|
||||
struct MPT3SAS_ADAPTER *ioc;
|
||||
|
||||
if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
|
||||
return;
|
||||
|
||||
ioc_info(ioc, "PCI error: resume callback!!\n");
|
||||
|
||||
|
@ -10990,8 +11263,11 @@ scsih_pci_resume(struct pci_dev *pdev)
|
|||
static pci_ers_result_t
|
||||
scsih_pci_mmio_enabled(struct pci_dev *pdev)
|
||||
{
|
||||
struct Scsi_Host *shost = pci_get_drvdata(pdev);
|
||||
struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
|
||||
struct Scsi_Host *shost;
|
||||
struct MPT3SAS_ADAPTER *ioc;
|
||||
|
||||
if (_scsih_get_shost_and_ioc(pdev, &shost, &ioc))
|
||||
return PCI_ERS_RESULT_DISCONNECT;
|
||||
|
||||
ioc_info(ioc, "PCI error: mmio enabled callback!!\n");
|
||||
|
||||
|
@ -11139,6 +11415,14 @@ static const struct pci_device_id mpt3sas_pci_table[] = {
|
|||
{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3916,
|
||||
PCI_ANY_ID, PCI_ANY_ID },
|
||||
|
||||
/*
|
||||
* Aero SI –> 0x00E0 Invalid, 0x00E3 Tampered
|
||||
*/
|
||||
{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3916,
|
||||
PCI_ANY_ID, PCI_ANY_ID },
|
||||
{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3916,
|
||||
PCI_ANY_ID, PCI_ANY_ID },
|
||||
|
||||
/* Atlas PCIe Switch Management Port */
|
||||
{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_ATLAS_PCIe_SWITCH_DEVID,
|
||||
PCI_ANY_ID, PCI_ANY_ID },
|
||||
|
@ -11151,6 +11435,14 @@ static const struct pci_device_id mpt3sas_pci_table[] = {
|
|||
{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_HARD_SEC_3816,
|
||||
PCI_ANY_ID, PCI_ANY_ID },
|
||||
|
||||
/*
|
||||
* Sea SI –> 0x00E4 Invalid, 0x00E7 Tampered
|
||||
*/
|
||||
{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID0_3816,
|
||||
PCI_ANY_ID, PCI_ANY_ID },
|
||||
{ MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_INVALID1_3816,
|
||||
PCI_ANY_ID, PCI_ANY_ID },
|
||||
|
||||
{0} /* Terminating entry */
|
||||
};
|
||||
MODULE_DEVICE_TABLE(pci, mpt3sas_pci_table);
|
||||
|
|
|
@ -246,19 +246,16 @@ static int mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
|
|||
&mvi->tx_dma, GFP_KERNEL);
|
||||
if (!mvi->tx)
|
||||
goto err_out;
|
||||
memset(mvi->tx, 0, sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ);
|
||||
mvi->rx_fis = dma_alloc_coherent(mvi->dev, MVS_RX_FISL_SZ,
|
||||
&mvi->rx_fis_dma, GFP_KERNEL);
|
||||
if (!mvi->rx_fis)
|
||||
goto err_out;
|
||||
memset(mvi->rx_fis, 0, MVS_RX_FISL_SZ);
|
||||
|
||||
mvi->rx = dma_alloc_coherent(mvi->dev,
|
||||
sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
|
||||
&mvi->rx_dma, GFP_KERNEL);
|
||||
if (!mvi->rx)
|
||||
goto err_out;
|
||||
memset(mvi->rx, 0, sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1));
|
||||
mvi->rx[0] = cpu_to_le32(0xfff);
|
||||
mvi->rx_cons = 0xfff;
|
||||
|
||||
|
@ -267,7 +264,6 @@ static int mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
|
|||
&mvi->slot_dma, GFP_KERNEL);
|
||||
if (!mvi->slot)
|
||||
goto err_out;
|
||||
memset(mvi->slot, 0, sizeof(*mvi->slot) * slot_nr);
|
||||
|
||||
mvi->bulk_buffer = dma_alloc_coherent(mvi->dev,
|
||||
TRASH_BUCKET_SIZE,
|
||||
|
|
|
@ -2425,6 +2425,7 @@ static int mvumi_io_attach(struct mvumi_hba *mhba)
|
|||
if (IS_ERR(mhba->dm_thread)) {
|
||||
dev_err(&mhba->pdev->dev,
|
||||
"failed to create device scan thread\n");
|
||||
ret = PTR_ERR(mhba->dm_thread);
|
||||
mutex_unlock(&mhba->sas_discovery_mutex);
|
||||
goto fail_create_thread;
|
||||
}
|
||||
|
|
|
@ -2226,7 +2226,7 @@ static struct device_attribute *myrb_shost_attrs[] = {
|
|||
NULL,
|
||||
};
|
||||
|
||||
struct scsi_host_template myrb_template = {
|
||||
static struct scsi_host_template myrb_template = {
|
||||
.module = THIS_MODULE,
|
||||
.name = "DAC960",
|
||||
.proc_name = "myrb",
|
||||
|
@ -2315,7 +2315,7 @@ static void myrb_get_state(struct device *dev)
|
|||
raid_set_state(myrb_raid_template, dev, state);
|
||||
}
|
||||
|
||||
struct raid_function_template myrb_raid_functions = {
|
||||
static struct raid_function_template myrb_raid_functions = {
|
||||
.cookie = &myrb_template,
|
||||
.is_raid = myrb_is_raid,
|
||||
.get_resync = myrb_get_resync,
|
||||
|
@ -2489,7 +2489,7 @@ static void myrb_monitor(struct work_struct *work)
|
|||
*
|
||||
* Return: true for fatal errors and false otherwise.
|
||||
*/
|
||||
bool myrb_err_status(struct myrb_hba *cb, unsigned char error,
|
||||
static bool myrb_err_status(struct myrb_hba *cb, unsigned char error,
|
||||
unsigned char parm0, unsigned char parm1)
|
||||
{
|
||||
struct pci_dev *pdev = cb->pdev;
|
||||
|
|
|
@ -1529,7 +1529,7 @@ static struct device_attribute *myrs_shost_attrs[] = {
|
|||
/*
|
||||
* SCSI midlayer interface
|
||||
*/
|
||||
int myrs_host_reset(struct scsi_cmnd *scmd)
|
||||
static int myrs_host_reset(struct scsi_cmnd *scmd)
|
||||
{
|
||||
struct Scsi_Host *shost = scmd->device->host;
|
||||
struct myrs_hba *cs = shost_priv(shost);
|
||||
|
@ -1919,7 +1919,7 @@ static void myrs_slave_destroy(struct scsi_device *sdev)
|
|||
kfree(sdev->hostdata);
|
||||
}
|
||||
|
||||
struct scsi_host_template myrs_template = {
|
||||
static struct scsi_host_template myrs_template = {
|
||||
.module = THIS_MODULE,
|
||||
.name = "DAC960",
|
||||
.proc_name = "myrs",
|
||||
|
@ -2033,7 +2033,7 @@ myrs_get_state(struct device *dev)
|
|||
raid_set_state(myrs_raid_template, dev, state);
|
||||
}
|
||||
|
||||
struct raid_function_template myrs_raid_functions = {
|
||||
static struct raid_function_template myrs_raid_functions = {
|
||||
.cookie = &myrs_template,
|
||||
.is_raid = myrs_is_raid,
|
||||
.get_resync = myrs_get_resync,
|
||||
|
@ -2043,7 +2043,7 @@ struct raid_function_template myrs_raid_functions = {
|
|||
/*
|
||||
* PCI interface functions
|
||||
*/
|
||||
void myrs_flush_cache(struct myrs_hba *cs)
|
||||
static void myrs_flush_cache(struct myrs_hba *cs)
|
||||
{
|
||||
myrs_dev_op(cs, MYRS_IOCTL_FLUSH_DEVICE_DATA, MYRS_RAID_CONTROLLER);
|
||||
}
|
||||
|
|
|
@ -1247,7 +1247,7 @@ static irqreturn_t do_nsp32_isr(int irq, void *dev_id)
|
|||
* ---> AutoSCSI with MSGOUTreg is processed.
|
||||
*/
|
||||
data->msgout_len = 0;
|
||||
};
|
||||
}
|
||||
|
||||
nsp32_dbg(NSP32_DEBUG_INTR, "MsgOut phase processed");
|
||||
}
|
||||
|
@ -1839,7 +1839,7 @@ static void nsp32_msgout_occur(struct scsi_cmnd *SCpnt)
|
|||
|
||||
nsp32_dbg(NSP32_DEBUG_MSGOUTOCCUR, "bus: 0x%x\n",
|
||||
nsp32_read1(base, SCSI_BUS_MONITOR));
|
||||
};
|
||||
}
|
||||
|
||||
data->msgout_len = 0;
|
||||
|
||||
|
|
|
@ -2860,10 +2860,8 @@ static struct pmcraid_cmd *pmcraid_abort_cmd(struct pmcraid_cmd *cmd)
|
|||
{
|
||||
struct pmcraid_cmd *cancel_cmd;
|
||||
struct pmcraid_instance *pinstance;
|
||||
struct pmcraid_resource_entry *res;
|
||||
|
||||
pinstance = (struct pmcraid_instance *)cmd->drv_inst;
|
||||
res = cmd->scsi_cmd->device->hostdata;
|
||||
|
||||
cancel_cmd = pmcraid_get_free_cmd(pinstance);
|
||||
|
||||
|
@ -4716,7 +4714,6 @@ static int pmcraid_allocate_host_rrqs(struct pmcraid_instance *pinstance)
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memset(pinstance->hrrq_start[i], 0, buffer_size);
|
||||
pinstance->hrrq_curr[i] = pinstance->hrrq_start[i];
|
||||
pinstance->hrrq_end[i] =
|
||||
pinstance->hrrq_start[i] + PMCRAID_MAX_CMD - 1;
|
||||
|
|
|
@ -389,6 +389,7 @@ struct qedf_ctx {
|
|||
mempool_t *io_mempool;
|
||||
struct workqueue_struct *dpc_wq;
|
||||
struct delayed_work recovery_work;
|
||||
struct delayed_work board_disable_work;
|
||||
struct delayed_work grcdump_work;
|
||||
struct delayed_work stag_work;
|
||||
|
||||
|
@ -541,9 +542,17 @@ extern void qedf_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data);
|
|||
extern void qedf_wq_grcdump(struct work_struct *work);
|
||||
void qedf_stag_change_work(struct work_struct *work);
|
||||
void qedf_ctx_soft_reset(struct fc_lport *lport);
|
||||
extern void qedf_board_disable_work(struct work_struct *work);
|
||||
extern void qedf_schedule_hw_err_handler(void *dev,
|
||||
enum qed_hw_err_type err_type);
|
||||
|
||||
#define FCOE_WORD_TO_BYTE 4
|
||||
#define QEDF_MAX_TASK_NUM 0xFFFF
|
||||
#define QL45xxx 0x165C
|
||||
#define QL41xxx 0x8080
|
||||
#define MAX_CT_PAYLOAD 2048
|
||||
#define DISCOVERED_PORTS 4
|
||||
#define NUMBER_OF_PORTS 1
|
||||
|
||||
struct fip_vlan {
|
||||
struct ethhdr eth;
|
||||
|
|
|
@ -124,7 +124,7 @@ static int qedf_initiate_els(struct qedf_rport *fcport, unsigned int op,
|
|||
task = qedf_get_task_mem(&qedf->tasks, xid);
|
||||
qedf_init_mp_task(els_req, task, sqe);
|
||||
|
||||
/* Put timer on original I/O request */
|
||||
/* Put timer on els request */
|
||||
if (timer_msec)
|
||||
qedf_cmd_timer_set(qedf, els_req, timer_msec);
|
||||
|
||||
|
@ -143,10 +143,33 @@ void qedf_process_els_compl(struct qedf_ctx *qedf, struct fcoe_cqe *cqe,
|
|||
struct qedf_ioreq *els_req)
|
||||
{
|
||||
struct fcoe_cqe_midpath_info *mp_info;
|
||||
struct qedf_rport *fcport;
|
||||
|
||||
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "Entered with xid = 0x%x"
|
||||
" cmd_type = %d.\n", els_req->xid, els_req->cmd_type);
|
||||
|
||||
if ((els_req->event == QEDF_IOREQ_EV_ELS_FLUSH)
|
||||
|| (els_req->event == QEDF_IOREQ_EV_CLEANUP_SUCCESS)
|
||||
|| (els_req->event == QEDF_IOREQ_EV_CLEANUP_FAILED)) {
|
||||
QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
|
||||
"ELS completion xid=0x%x after flush event=0x%x",
|
||||
els_req->xid, els_req->event);
|
||||
return;
|
||||
}
|
||||
|
||||
fcport = els_req->fcport;
|
||||
|
||||
/* When flush is active,
|
||||
* let the cmds be completed from the cleanup context
|
||||
*/
|
||||
if (test_bit(QEDF_RPORT_IN_TARGET_RESET, &fcport->flags) ||
|
||||
test_bit(QEDF_RPORT_IN_LUN_RESET, &fcport->flags)) {
|
||||
QEDF_INFO(&qedf->dbg_ctx, QEDF_LOG_IO,
|
||||
"Dropping ELS completion xid=0x%x as fcport is flushing",
|
||||
els_req->xid);
|
||||
return;
|
||||
}
|
||||
|
||||
clear_bit(QEDF_CMD_OUTSTANDING, &els_req->flags);
|
||||
|
||||
/* Kill the ELS timer */
|
||||
|
@ -185,10 +208,6 @@ static void qedf_rrq_compl(struct qedf_els_cb_arg *cb_arg)
|
|||
goto out_free;
|
||||
}
|
||||
|
||||
if (rrq_req->event != QEDF_IOREQ_EV_ELS_TMO &&
|
||||
rrq_req->event != QEDF_IOREQ_EV_ELS_ERR_DETECT)
|
||||
cancel_delayed_work_sync(&orig_io_req->timeout_work);
|
||||
|
||||
refcount = kref_read(&orig_io_req->refcount);
|
||||
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS, "rrq_compl: orig io = %p,"
|
||||
" orig xid = 0x%x, rrq_xid = 0x%x, refcount=%d\n",
|
||||
|
@ -883,6 +902,11 @@ static void qedf_rec_compl(struct qedf_els_cb_arg *cb_arg)
|
|||
opcode = fc_frame_payload_op(fp);
|
||||
if (opcode == ELS_LS_RJT) {
|
||||
rjt = fc_frame_payload_get(fp, sizeof(*rjt));
|
||||
if (!rjt) {
|
||||
QEDF_ERR(&qedf->dbg_ctx, "payload get failed");
|
||||
goto out_free_frame;
|
||||
}
|
||||
|
||||
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_ELS,
|
||||
"Received LS_RJT for REC: er_reason=0x%x, "
|
||||
"er_explan=0x%x.\n", rjt->er_reason, rjt->er_explan);
|
||||
|
|
|
@ -85,13 +85,13 @@ static void qedf_cmd_timeout(struct work_struct *work)
|
|||
*/
|
||||
QEDF_ERR(&(qedf->dbg_ctx), "ELS timeout, xid=0x%x.\n",
|
||||
io_req->xid);
|
||||
qedf_initiate_cleanup(io_req, true);
|
||||
io_req->event = QEDF_IOREQ_EV_ELS_TMO;
|
||||
/* Call callback function to complete command */
|
||||
if (io_req->cb_func && io_req->cb_arg) {
|
||||
io_req->cb_func(io_req->cb_arg);
|
||||
io_req->cb_arg = NULL;
|
||||
}
|
||||
qedf_initiate_cleanup(io_req, true);
|
||||
kref_put(&io_req->refcount, qedf_release_cmd);
|
||||
break;
|
||||
case QEDF_SEQ_CLEANUP:
|
||||
|
@ -1562,6 +1562,8 @@ static void qedf_flush_els_req(struct qedf_ctx *qedf,
|
|||
*/
|
||||
els_req->event = QEDF_IOREQ_EV_ELS_FLUSH;
|
||||
|
||||
clear_bit(QEDF_CMD_OUTSTANDING, &els_req->flags);
|
||||
|
||||
/* Cancel the timer */
|
||||
cancel_delayed_work_sync(&els_req->timeout_work);
|
||||
|
||||
|
@ -1704,8 +1706,10 @@ void qedf_flush_active_ios(struct qedf_rport *fcport, int lun)
|
|||
io_req, io_req->xid);
|
||||
continue;
|
||||
}
|
||||
qedf_initiate_cleanup(io_req, false);
|
||||
flush_cnt++;
|
||||
qedf_flush_els_req(qedf, io_req);
|
||||
|
||||
/*
|
||||
* Release the kref and go back to the top of the
|
||||
* loop.
|
||||
|
@ -2159,7 +2163,6 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
|
|||
/* Sanity check qedf_rport before dereferencing any pointers */
|
||||
if (!test_bit(QEDF_RPORT_SESSION_READY, &fcport->flags)) {
|
||||
QEDF_ERR(NULL, "tgt not offloaded\n");
|
||||
rc = 1;
|
||||
return SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -2169,6 +2172,10 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
|
|||
return SUCCESS;
|
||||
}
|
||||
|
||||
if (io_req->cmd_type == QEDF_ELS) {
|
||||
goto process_els;
|
||||
}
|
||||
|
||||
if (!test_bit(QEDF_CMD_OUTSTANDING, &io_req->flags) ||
|
||||
test_and_set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags)) {
|
||||
QEDF_ERR(&(qedf->dbg_ctx), "io_req xid=0x%x already in "
|
||||
|
@ -2178,6 +2185,7 @@ int qedf_initiate_cleanup(struct qedf_ioreq *io_req,
|
|||
}
|
||||
set_bit(QEDF_CMD_IN_CLEANUP, &io_req->flags);
|
||||
|
||||
process_els:
|
||||
/* Ensure room on SQ */
|
||||
if (!atomic_read(&fcport->free_sqes)) {
|
||||
QEDF_ERR(&(qedf->dbg_ctx), "No SQ entries available\n");
|
||||
|
|
|
@ -41,7 +41,7 @@ MODULE_PARM_DESC(dev_loss_tmo, " dev_loss_tmo setting for attached "
|
|||
"remote ports (default 60)");
|
||||
|
||||
uint qedf_debug = QEDF_LOG_INFO;
|
||||
module_param_named(debug, qedf_debug, uint, S_IRUGO);
|
||||
module_param_named(debug, qedf_debug, uint, S_IRUGO|S_IWUSR);
|
||||
MODULE_PARM_DESC(debug, " Debug mask. Pass '1' to enable default debugging"
|
||||
" mask");
|
||||
|
||||
|
@ -105,6 +105,12 @@ module_param_named(dp_level, qedf_dp_level, uint, S_IRUGO);
|
|||
MODULE_PARM_DESC(dp_level, " printk verbosity control passed to qed module "
|
||||
"during probe (0-3: 0 more verbose).");
|
||||
|
||||
static bool qedf_enable_recovery = true;
|
||||
module_param_named(enable_recovery, qedf_enable_recovery,
|
||||
bool, S_IRUGO | S_IWUSR);
|
||||
MODULE_PARM_DESC(enable_recovery, "Enable/disable recovery on driver/firmware "
|
||||
"interface level errors 0 = Disabled, 1 = Enabled (Default: 1).");
|
||||
|
||||
struct workqueue_struct *qedf_io_wq;
|
||||
|
||||
static struct fcoe_percpu_s qedf_global;
|
||||
|
@ -690,6 +696,7 @@ static struct qed_fcoe_cb_ops qedf_cb_ops = {
|
|||
.dcbx_aen = qedf_dcbx_handler,
|
||||
.get_generic_tlv_data = qedf_get_generic_tlv_data,
|
||||
.get_protocol_tlv_data = qedf_get_protocol_tlv_data,
|
||||
.schedule_hw_err_handler = qedf_schedule_hw_err_handler,
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -726,7 +733,7 @@ static int qedf_eh_abort(struct scsi_cmnd *sc_cmd)
|
|||
rdata = fcport->rdata;
|
||||
if (!rdata || !kref_get_unless_zero(&rdata->kref)) {
|
||||
QEDF_ERR(&qedf->dbg_ctx, "stale rport, sc_cmd=%p\n", sc_cmd);
|
||||
rc = 1;
|
||||
rc = SUCCESS;
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -1333,7 +1340,7 @@ static int qedf_offload_connection(struct qedf_ctx *qedf,
|
|||
ether_addr_copy(conn_info.dst_mac, qedf->ctlr.dest_addr);
|
||||
|
||||
conn_info.tx_max_fc_pay_len = fcport->rdata->maxframe_size;
|
||||
conn_info.e_d_tov_timer_val = qedf->lport->e_d_tov / 20;
|
||||
conn_info.e_d_tov_timer_val = qedf->lport->e_d_tov;
|
||||
conn_info.rec_tov_timer_val = 3; /* I think this is what E3 was */
|
||||
conn_info.rx_max_fc_pay_len = fcport->rdata->maxframe_size;
|
||||
|
||||
|
@ -1558,6 +1565,17 @@ static void qedf_rport_event_handler(struct fc_lport *lport,
|
|||
if (port_id == FC_FID_DIR_SERV)
|
||||
break;
|
||||
|
||||
if (rdata->spp_type != FC_TYPE_FCP) {
|
||||
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
|
||||
"No action since spp type isn't FCP\n");
|
||||
break;
|
||||
}
|
||||
if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) {
|
||||
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
|
||||
"Not FCP target so no action\n");
|
||||
break;
|
||||
}
|
||||
|
||||
if (!rport) {
|
||||
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC,
|
||||
"port_id=%x - rport notcreated Yet!!\n", port_id);
|
||||
|
@ -1634,11 +1652,13 @@ static void qedf_fcoe_ctlr_setup(struct qedf_ctx *qedf)
|
|||
static void qedf_setup_fdmi(struct qedf_ctx *qedf)
|
||||
{
|
||||
struct fc_lport *lport = qedf->lport;
|
||||
struct fc_host_attrs *fc_host = shost_to_fc_host(lport->host);
|
||||
u64 dsn;
|
||||
u8 buf[8];
|
||||
int pos;
|
||||
uint32_t i;
|
||||
|
||||
/*
|
||||
* fdmi_enabled needs to be set for libfc to execute FDMI registration.
|
||||
* fdmi_enabled needs to be set for libfc
|
||||
* to execute FDMI registration
|
||||
*/
|
||||
lport->fdmi_enabled = 1;
|
||||
|
||||
|
@ -1648,32 +1668,53 @@ static void qedf_setup_fdmi(struct qedf_ctx *qedf)
|
|||
*/
|
||||
|
||||
/* Get the PCI-e Device Serial Number Capability */
|
||||
dsn = pci_get_dsn(qedf->pdev);
|
||||
if (dsn)
|
||||
snprintf(fc_host->serial_number,
|
||||
sizeof(fc_host->serial_number), "%016llX", dsn);
|
||||
else
|
||||
snprintf(fc_host->serial_number,
|
||||
sizeof(fc_host->serial_number), "Unknown");
|
||||
pos = pci_find_ext_capability(qedf->pdev, PCI_EXT_CAP_ID_DSN);
|
||||
if (pos) {
|
||||
pos += 4;
|
||||
for (i = 0; i < 8; i++)
|
||||
pci_read_config_byte(qedf->pdev, pos + i, &buf[i]);
|
||||
|
||||
snprintf(fc_host->manufacturer,
|
||||
sizeof(fc_host->manufacturer), "%s", "Cavium Inc.");
|
||||
snprintf(fc_host_serial_number(lport->host),
|
||||
FC_SERIAL_NUMBER_SIZE,
|
||||
"%02X%02X%02X%02X%02X%02X%02X%02X",
|
||||
buf[7], buf[6], buf[5], buf[4],
|
||||
buf[3], buf[2], buf[1], buf[0]);
|
||||
} else
|
||||
snprintf(fc_host_serial_number(lport->host),
|
||||
FC_SERIAL_NUMBER_SIZE, "Unknown");
|
||||
|
||||
snprintf(fc_host->model, sizeof(fc_host->model), "%s", "QL41000");
|
||||
snprintf(fc_host_manufacturer(lport->host),
|
||||
FC_SERIAL_NUMBER_SIZE, "%s", "Marvell Semiconductor Inc.");
|
||||
|
||||
snprintf(fc_host->model_description, sizeof(fc_host->model_description),
|
||||
"%s", "QLogic FastLinQ QL41000 Series 10/25/40/50GGbE Controller"
|
||||
"(FCoE)");
|
||||
if (qedf->pdev->device == QL45xxx) {
|
||||
snprintf(fc_host_model(lport->host),
|
||||
FC_SYMBOLIC_NAME_SIZE, "%s", "QL45xxx");
|
||||
|
||||
snprintf(fc_host->hardware_version, sizeof(fc_host->hardware_version),
|
||||
"Rev %d", qedf->pdev->revision);
|
||||
snprintf(fc_host_model_description(lport->host),
|
||||
FC_SYMBOLIC_NAME_SIZE, "%s",
|
||||
"Marvell FastLinQ QL45xxx FCoE Adapter");
|
||||
}
|
||||
|
||||
snprintf(fc_host->driver_version, sizeof(fc_host->driver_version),
|
||||
"%s", QEDF_VERSION);
|
||||
if (qedf->pdev->device == QL41xxx) {
|
||||
snprintf(fc_host_model(lport->host),
|
||||
FC_SYMBOLIC_NAME_SIZE, "%s", "QL41xxx");
|
||||
|
||||
snprintf(fc_host_model_description(lport->host),
|
||||
FC_SYMBOLIC_NAME_SIZE, "%s",
|
||||
"Marvell FastLinQ QL41xxx FCoE Adapter");
|
||||
}
|
||||
|
||||
snprintf(fc_host_hardware_version(lport->host),
|
||||
FC_VERSION_STRING_SIZE, "Rev %d", qedf->pdev->revision);
|
||||
|
||||
snprintf(fc_host_driver_version(lport->host),
|
||||
FC_VERSION_STRING_SIZE, "%s", QEDF_VERSION);
|
||||
|
||||
snprintf(fc_host_firmware_version(lport->host),
|
||||
FC_VERSION_STRING_SIZE, "%d.%d.%d.%d",
|
||||
FW_MAJOR_VERSION, FW_MINOR_VERSION, FW_REVISION_VERSION,
|
||||
FW_ENGINEERING_VERSION);
|
||||
|
||||
snprintf(fc_host->firmware_version, sizeof(fc_host->firmware_version),
|
||||
"%d.%d.%d.%d", FW_MAJOR_VERSION, FW_MINOR_VERSION,
|
||||
FW_REVISION_VERSION, FW_ENGINEERING_VERSION);
|
||||
}
|
||||
|
||||
static int qedf_lport_setup(struct qedf_ctx *qedf)
|
||||
|
@ -1720,8 +1761,13 @@ static int qedf_lport_setup(struct qedf_ctx *qedf)
|
|||
fc_host_dev_loss_tmo(lport->host) = qedf_dev_loss_tmo;
|
||||
|
||||
/* Set symbolic node name */
|
||||
snprintf(fc_host_symbolic_name(lport->host), 256,
|
||||
"QLogic %s v%s", QEDF_MODULE_NAME, QEDF_VERSION);
|
||||
if (qedf->pdev->device == QL45xxx)
|
||||
snprintf(fc_host_symbolic_name(lport->host), 256,
|
||||
"Marvell FastLinQ 45xxx FCoE v%s", QEDF_VERSION);
|
||||
|
||||
if (qedf->pdev->device == QL41xxx)
|
||||
snprintf(fc_host_symbolic_name(lport->host), 256,
|
||||
"Marvell FastLinQ 41xxx FCoE v%s", QEDF_VERSION);
|
||||
|
||||
qedf_setup_fdmi(qedf);
|
||||
|
||||
|
@ -3221,11 +3267,16 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
|
|||
void *task_start, *task_end;
|
||||
struct qed_slowpath_params slowpath_params;
|
||||
struct qed_probe_params qed_params;
|
||||
u16 retry_cnt = 10;
|
||||
|
||||
/*
|
||||
* When doing error recovery we didn't reap the lport so don't try
|
||||
* to reallocate it.
|
||||
*/
|
||||
retry_probe:
|
||||
if (mode == QEDF_MODE_RECOVERY)
|
||||
msleep(2000);
|
||||
|
||||
if (mode != QEDF_MODE_RECOVERY) {
|
||||
lport = libfc_host_alloc(&qedf_host_template,
|
||||
sizeof(struct qedf_ctx));
|
||||
|
@ -3312,6 +3363,12 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
|
|||
qed_params.is_vf = is_vf;
|
||||
qedf->cdev = qed_ops->common->probe(pdev, &qed_params);
|
||||
if (!qedf->cdev) {
|
||||
if ((mode == QEDF_MODE_RECOVERY) && retry_cnt) {
|
||||
QEDF_ERR(&qedf->dbg_ctx,
|
||||
"Retry %d initialize hardware\n", retry_cnt);
|
||||
retry_cnt--;
|
||||
goto retry_probe;
|
||||
}
|
||||
QEDF_ERR(&qedf->dbg_ctx, "common probe failed.\n");
|
||||
rc = -ENODEV;
|
||||
goto err1;
|
||||
|
@ -3760,6 +3817,44 @@ void qedf_wq_grcdump(struct work_struct *work)
|
|||
qedf_capture_grc_dump(qedf);
|
||||
}
|
||||
|
||||
void qedf_schedule_hw_err_handler(void *dev, enum qed_hw_err_type err_type)
|
||||
{
|
||||
struct qedf_ctx *qedf = dev;
|
||||
|
||||
QEDF_ERR(&(qedf->dbg_ctx),
|
||||
"Hardware error handler scheduled, event=%d.\n",
|
||||
err_type);
|
||||
|
||||
if (test_bit(QEDF_IN_RECOVERY, &qedf->flags)) {
|
||||
QEDF_ERR(&(qedf->dbg_ctx),
|
||||
"Already in recovery, not scheduling board disable work.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
switch (err_type) {
|
||||
case QED_HW_ERR_FAN_FAIL:
|
||||
schedule_delayed_work(&qedf->board_disable_work, 0);
|
||||
break;
|
||||
case QED_HW_ERR_MFW_RESP_FAIL:
|
||||
case QED_HW_ERR_HW_ATTN:
|
||||
case QED_HW_ERR_DMAE_FAIL:
|
||||
case QED_HW_ERR_FW_ASSERT:
|
||||
/* Prevent HW attentions from being reasserted */
|
||||
qed_ops->common->attn_clr_enable(qedf->cdev, true);
|
||||
break;
|
||||
case QED_HW_ERR_RAMROD_FAIL:
|
||||
/* Prevent HW attentions from being reasserted */
|
||||
qed_ops->common->attn_clr_enable(qedf->cdev, true);
|
||||
|
||||
if (qedf_enable_recovery)
|
||||
qed_ops->common->recovery_process(qedf->cdev);
|
||||
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Protocol TLV handler
|
||||
*/
|
||||
|
|
|
@ -274,6 +274,10 @@ struct qedi_ctx {
|
|||
spinlock_t ll2_lock; /* Light L2 lock */
|
||||
spinlock_t hba_lock; /* per port lock */
|
||||
struct task_struct *ll2_recv_thread;
|
||||
unsigned long qedi_err_flags;
|
||||
#define QEDI_ERR_ATTN_CLR_EN 0
|
||||
#define QEDI_ERR_IS_RECOVERABLE 2
|
||||
#define QEDI_ERR_OVERRIDE_EN 31
|
||||
unsigned long flags;
|
||||
#define UIO_DEV_OPENED 1
|
||||
#define QEDI_IOTHREAD_WAKE 2
|
||||
|
@ -305,6 +309,7 @@ struct qedi_ctx {
|
|||
u32 max_sqes;
|
||||
u8 num_queues;
|
||||
u32 max_active_conns;
|
||||
s32 msix_count;
|
||||
|
||||
struct iscsi_cid_queue cid_que;
|
||||
struct qedi_endpoint **ep_tbl;
|
||||
|
@ -334,6 +339,7 @@ struct qedi_ctx {
|
|||
|
||||
struct workqueue_struct *dpc_wq;
|
||||
struct delayed_work recovery_work;
|
||||
struct delayed_work board_disable_work;
|
||||
|
||||
spinlock_t task_idx_lock; /* To protect gbl context */
|
||||
s32 last_tidx_alloc;
|
||||
|
|
|
@ -59,6 +59,7 @@ static void qedi_process_logout_resp(struct qedi_ctx *qedi,
|
|||
"Freeing tid=0x%x for cid=0x%x\n",
|
||||
cmd->task_id, qedi_conn->iscsi_conn_id);
|
||||
|
||||
spin_lock(&qedi_conn->list_lock);
|
||||
if (likely(cmd->io_cmd_in_list)) {
|
||||
cmd->io_cmd_in_list = false;
|
||||
list_del_init(&cmd->io_cmd);
|
||||
|
@ -69,6 +70,7 @@ static void qedi_process_logout_resp(struct qedi_ctx *qedi,
|
|||
cmd->task_id, qedi_conn->iscsi_conn_id,
|
||||
&cmd->io_cmd);
|
||||
}
|
||||
spin_unlock(&qedi_conn->list_lock);
|
||||
|
||||
cmd->state = RESPONSE_RECEIVED;
|
||||
qedi_clear_task_idx(qedi, cmd->task_id);
|
||||
|
@ -122,6 +124,7 @@ static void qedi_process_text_resp(struct qedi_ctx *qedi,
|
|||
"Freeing tid=0x%x for cid=0x%x\n",
|
||||
cmd->task_id, qedi_conn->iscsi_conn_id);
|
||||
|
||||
spin_lock(&qedi_conn->list_lock);
|
||||
if (likely(cmd->io_cmd_in_list)) {
|
||||
cmd->io_cmd_in_list = false;
|
||||
list_del_init(&cmd->io_cmd);
|
||||
|
@ -132,6 +135,7 @@ static void qedi_process_text_resp(struct qedi_ctx *qedi,
|
|||
cmd->task_id, qedi_conn->iscsi_conn_id,
|
||||
&cmd->io_cmd);
|
||||
}
|
||||
spin_unlock(&qedi_conn->list_lock);
|
||||
|
||||
cmd->state = RESPONSE_RECEIVED;
|
||||
qedi_clear_task_idx(qedi, cmd->task_id);
|
||||
|
@ -222,11 +226,13 @@ static void qedi_process_tmf_resp(struct qedi_ctx *qedi,
|
|||
|
||||
tmf_hdr = (struct iscsi_tm *)qedi_cmd->task->hdr;
|
||||
|
||||
spin_lock(&qedi_conn->list_lock);
|
||||
if (likely(qedi_cmd->io_cmd_in_list)) {
|
||||
qedi_cmd->io_cmd_in_list = false;
|
||||
list_del_init(&qedi_cmd->io_cmd);
|
||||
qedi_conn->active_cmd_count--;
|
||||
}
|
||||
spin_unlock(&qedi_conn->list_lock);
|
||||
|
||||
if (((tmf_hdr->flags & ISCSI_FLAG_TM_FUNC_MASK) ==
|
||||
ISCSI_TM_FUNC_LOGICAL_UNIT_RESET) ||
|
||||
|
@ -288,11 +294,13 @@ static void qedi_process_login_resp(struct qedi_ctx *qedi,
|
|||
ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK;
|
||||
qedi_conn->gen_pdu.resp_wr_ptr = qedi_conn->gen_pdu.resp_buf + pld_len;
|
||||
|
||||
spin_lock(&qedi_conn->list_lock);
|
||||
if (likely(cmd->io_cmd_in_list)) {
|
||||
cmd->io_cmd_in_list = false;
|
||||
list_del_init(&cmd->io_cmd);
|
||||
qedi_conn->active_cmd_count--;
|
||||
}
|
||||
spin_unlock(&qedi_conn->list_lock);
|
||||
|
||||
memset(task_ctx, '\0', sizeof(*task_ctx));
|
||||
|
||||
|
@ -816,8 +824,11 @@ static void qedi_process_cmd_cleanup_resp(struct qedi_ctx *qedi,
|
|||
qedi_clear_task_idx(qedi_conn->qedi, rtid);
|
||||
|
||||
spin_lock(&qedi_conn->list_lock);
|
||||
list_del_init(&dbg_cmd->io_cmd);
|
||||
qedi_conn->active_cmd_count--;
|
||||
if (likely(dbg_cmd->io_cmd_in_list)) {
|
||||
dbg_cmd->io_cmd_in_list = false;
|
||||
list_del_init(&dbg_cmd->io_cmd);
|
||||
qedi_conn->active_cmd_count--;
|
||||
}
|
||||
spin_unlock(&qedi_conn->list_lock);
|
||||
qedi_cmd->state = CLEANUP_RECV;
|
||||
wake_up_interruptible(&qedi_conn->wait_queue);
|
||||
|
@ -1235,6 +1246,7 @@ int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
|
|||
qedi_conn->cmd_cleanup_req++;
|
||||
qedi_iscsi_cleanup_task(ctask, true);
|
||||
|
||||
cmd->io_cmd_in_list = false;
|
||||
list_del_init(&cmd->io_cmd);
|
||||
qedi_conn->active_cmd_count--;
|
||||
QEDI_WARN(&qedi->dbg_ctx,
|
||||
|
@ -1255,7 +1267,8 @@ int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
|
|||
rval = wait_event_interruptible_timeout(qedi_conn->wait_queue,
|
||||
((qedi_conn->cmd_cleanup_req ==
|
||||
qedi_conn->cmd_cleanup_cmpl) ||
|
||||
qedi_conn->ep),
|
||||
test_bit(QEDI_IN_RECOVERY,
|
||||
&qedi->flags)),
|
||||
5 * HZ);
|
||||
if (rval) {
|
||||
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_SCSI_TM,
|
||||
|
@ -1280,7 +1293,9 @@ int qedi_cleanup_all_io(struct qedi_ctx *qedi, struct qedi_conn *qedi_conn,
|
|||
/* Enable IOs for all other sessions except current.*/
|
||||
if (!wait_event_interruptible_timeout(qedi_conn->wait_queue,
|
||||
(qedi_conn->cmd_cleanup_req ==
|
||||
qedi_conn->cmd_cleanup_cmpl),
|
||||
qedi_conn->cmd_cleanup_cmpl) ||
|
||||
test_bit(QEDI_IN_RECOVERY,
|
||||
&qedi->flags),
|
||||
5 * HZ)) {
|
||||
iscsi_host_for_each_session(qedi->shost,
|
||||
qedi_mark_device_available);
|
||||
|
@ -1446,8 +1461,11 @@ static void qedi_tmf_work(struct work_struct *work)
|
|||
spin_unlock_bh(&qedi_conn->tmf_work_lock);
|
||||
|
||||
spin_lock(&qedi_conn->list_lock);
|
||||
list_del_init(&cmd->io_cmd);
|
||||
qedi_conn->active_cmd_count--;
|
||||
if (likely(cmd->io_cmd_in_list)) {
|
||||
cmd->io_cmd_in_list = false;
|
||||
list_del_init(&cmd->io_cmd);
|
||||
qedi_conn->active_cmd_count--;
|
||||
}
|
||||
spin_unlock(&qedi_conn->list_lock);
|
||||
|
||||
clear_bit(QEDI_CONN_FW_CLEANUP, &qedi_conn->flags);
|
||||
|
|
|
@ -975,11 +975,13 @@ static void qedi_cleanup_active_cmd_list(struct qedi_conn *qedi_conn)
|
|||
{
|
||||
struct qedi_cmd *cmd, *cmd_tmp;
|
||||
|
||||
spin_lock(&qedi_conn->list_lock);
|
||||
list_for_each_entry_safe(cmd, cmd_tmp, &qedi_conn->active_cmd_list,
|
||||
io_cmd) {
|
||||
list_del_init(&cmd->io_cmd);
|
||||
qedi_conn->active_cmd_count--;
|
||||
}
|
||||
spin_unlock(&qedi_conn->list_lock);
|
||||
}
|
||||
|
||||
static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
|
||||
|
@ -1069,6 +1071,11 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
|
|||
wait_delay += qedi->pf_params.iscsi_pf_params.two_msl_timer;
|
||||
|
||||
qedi_ep->state = EP_STATE_DISCONN_START;
|
||||
|
||||
if (test_bit(QEDI_IN_SHUTDOWN, &qedi->flags) ||
|
||||
test_bit(QEDI_IN_RECOVERY, &qedi->flags))
|
||||
goto ep_release_conn;
|
||||
|
||||
ret = qedi_ops->destroy_conn(qedi->cdev, qedi_ep->handle, abrt_conn);
|
||||
if (ret) {
|
||||
QEDI_WARN(&qedi->dbg_ctx,
|
||||
|
|
|
@ -50,6 +50,10 @@ module_param(qedi_ll2_buf_size, uint, 0644);
|
|||
MODULE_PARM_DESC(qedi_ll2_buf_size,
|
||||
"parameter to set ping packet size, default - 0x400, Jumbo packets - 0x2400.");
|
||||
|
||||
static uint qedi_flags_override;
|
||||
module_param(qedi_flags_override, uint, 0644);
|
||||
MODULE_PARM_DESC(qedi_flags_override, "Disable/Enable MFW error flags bits action.");
|
||||
|
||||
const struct qed_iscsi_ops *qedi_ops;
|
||||
static struct scsi_transport_template *qedi_scsi_transport;
|
||||
static struct pci_driver qedi_pci_driver;
|
||||
|
@ -63,6 +67,8 @@ static void qedi_reset_uio_rings(struct qedi_uio_dev *udev);
|
|||
static void qedi_ll2_free_skbs(struct qedi_ctx *qedi);
|
||||
static struct nvm_iscsi_block *qedi_get_nvram_block(struct qedi_ctx *qedi);
|
||||
static void qedi_recovery_handler(struct work_struct *work);
|
||||
static void qedi_schedule_hw_err_handler(void *dev,
|
||||
enum qed_hw_err_type err_type);
|
||||
|
||||
static int qedi_iscsi_event_cb(void *context, u8 fw_event_code, void *fw_handle)
|
||||
{
|
||||
|
@ -789,8 +795,7 @@ static void qedi_ll2_free_skbs(struct qedi_ctx *qedi)
|
|||
spin_lock_bh(&qedi->ll2_lock);
|
||||
list_for_each_entry_safe(work, work_tmp, &qedi->ll2_skb_list, list) {
|
||||
list_del(&work->list);
|
||||
if (work->skb)
|
||||
kfree_skb(work->skb);
|
||||
kfree_skb(work->skb);
|
||||
kfree(work);
|
||||
}
|
||||
spin_unlock_bh(&qedi->ll2_lock);
|
||||
|
@ -1113,6 +1118,42 @@ static void qedi_get_protocol_tlv_data(void *dev, void *data)
|
|||
return;
|
||||
}
|
||||
|
||||
void qedi_schedule_hw_err_handler(void *dev,
|
||||
enum qed_hw_err_type err_type)
|
||||
{
|
||||
struct qedi_ctx *qedi = (struct qedi_ctx *)dev;
|
||||
unsigned long override_flags = qedi_flags_override;
|
||||
|
||||
if (override_flags && test_bit(QEDI_ERR_OVERRIDE_EN, &override_flags))
|
||||
qedi->qedi_err_flags = qedi_flags_override;
|
||||
|
||||
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
|
||||
"HW error handler scheduled, err=%d err_flags=0x%x\n",
|
||||
err_type, qedi->qedi_err_flags);
|
||||
|
||||
switch (err_type) {
|
||||
case QED_HW_ERR_FAN_FAIL:
|
||||
schedule_delayed_work(&qedi->board_disable_work, 0);
|
||||
break;
|
||||
case QED_HW_ERR_MFW_RESP_FAIL:
|
||||
case QED_HW_ERR_HW_ATTN:
|
||||
case QED_HW_ERR_DMAE_FAIL:
|
||||
case QED_HW_ERR_RAMROD_FAIL:
|
||||
case QED_HW_ERR_FW_ASSERT:
|
||||
/* Prevent HW attentions from being reasserted */
|
||||
if (test_bit(QEDI_ERR_ATTN_CLR_EN, &qedi->qedi_err_flags))
|
||||
qedi_ops->common->attn_clr_enable(qedi->cdev, true);
|
||||
|
||||
if (err_type == QED_HW_ERR_RAMROD_FAIL &&
|
||||
test_bit(QEDI_ERR_IS_RECOVERABLE, &qedi->qedi_err_flags))
|
||||
qedi_ops->common->recovery_process(qedi->cdev);
|
||||
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void qedi_schedule_recovery_handler(void *dev)
|
||||
{
|
||||
struct qedi_ctx *qedi = dev;
|
||||
|
@ -1127,6 +1168,15 @@ static void qedi_schedule_recovery_handler(void *dev)
|
|||
schedule_delayed_work(&qedi->recovery_work, 0);
|
||||
}
|
||||
|
||||
static void qedi_set_conn_recovery(struct iscsi_cls_session *cls_session)
|
||||
{
|
||||
struct iscsi_session *session = cls_session->dd_data;
|
||||
struct iscsi_conn *conn = session->leadconn;
|
||||
struct qedi_conn *qedi_conn = conn->dd_data;
|
||||
|
||||
qedi_start_conn_recovery(qedi_conn->qedi, qedi_conn);
|
||||
}
|
||||
|
||||
static void qedi_link_update(void *dev, struct qed_link_output *link)
|
||||
{
|
||||
struct qedi_ctx *qedi = (struct qedi_ctx *)dev;
|
||||
|
@ -1138,6 +1188,7 @@ static void qedi_link_update(void *dev, struct qed_link_output *link)
|
|||
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
|
||||
"Link Down event.\n");
|
||||
atomic_set(&qedi->link_state, QEDI_LINK_DOWN);
|
||||
iscsi_host_for_each_session(qedi->shost, qedi_set_conn_recovery);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1145,6 +1196,7 @@ static struct qed_iscsi_cb_ops qedi_cb_ops = {
|
|||
{
|
||||
.link_update = qedi_link_update,
|
||||
.schedule_recovery_handler = qedi_schedule_recovery_handler,
|
||||
.schedule_hw_err_handler = qedi_schedule_hw_err_handler,
|
||||
.get_protocol_tlv_data = qedi_get_protocol_tlv_data,
|
||||
.get_generic_tlv_data = qedi_get_generic_tlv_data,
|
||||
}
|
||||
|
@ -1357,7 +1409,7 @@ static int qedi_request_msix_irq(struct qedi_ctx *qedi)
|
|||
u16 idx;
|
||||
|
||||
cpu = cpumask_first(cpu_online_mask);
|
||||
for (i = 0; i < qedi->int_info.msix_cnt; i++) {
|
||||
for (i = 0; i < qedi->msix_count; i++) {
|
||||
idx = i * qedi->dev_info.common.num_hwfns +
|
||||
qedi_ops->common->get_affin_hwfn_idx(qedi->cdev);
|
||||
|
||||
|
@ -1387,7 +1439,12 @@ static int qedi_setup_int(struct qedi_ctx *qedi)
|
|||
{
|
||||
int rc = 0;
|
||||
|
||||
rc = qedi_ops->common->set_fp_int(qedi->cdev, num_online_cpus());
|
||||
rc = qedi_ops->common->set_fp_int(qedi->cdev, qedi->num_queues);
|
||||
if (rc < 0)
|
||||
goto exit_setup_int;
|
||||
|
||||
qedi->msix_count = rc;
|
||||
|
||||
rc = qedi_ops->common->get_fp_int(qedi->cdev, &qedi->int_info);
|
||||
if (rc)
|
||||
goto exit_setup_int;
|
||||
|
@ -2336,10 +2393,30 @@ static int qedi_setup_boot_info(struct qedi_ctx *qedi)
|
|||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static pci_ers_result_t qedi_io_error_detected(struct pci_dev *pdev,
|
||||
pci_channel_state_t state)
|
||||
{
|
||||
struct qedi_ctx *qedi = pci_get_drvdata(pdev);
|
||||
|
||||
QEDI_ERR(&qedi->dbg_ctx, "%s: PCI error detected [%d]\n",
|
||||
__func__, state);
|
||||
|
||||
if (test_and_set_bit(QEDI_IN_RECOVERY, &qedi->flags)) {
|
||||
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
|
||||
"Recovery already in progress.\n");
|
||||
return PCI_ERS_RESULT_NONE;
|
||||
}
|
||||
|
||||
qedi_ops->common->recovery_process(qedi->cdev);
|
||||
|
||||
return PCI_ERS_RESULT_CAN_RECOVER;
|
||||
}
|
||||
|
||||
static void __qedi_remove(struct pci_dev *pdev, int mode)
|
||||
{
|
||||
struct qedi_ctx *qedi = pci_get_drvdata(pdev);
|
||||
int rval;
|
||||
u16 retry = 10;
|
||||
|
||||
if (mode == QEDI_MODE_SHUTDOWN)
|
||||
iscsi_host_for_each_session(qedi->shost,
|
||||
|
@ -2368,7 +2445,13 @@ static void __qedi_remove(struct pci_dev *pdev, int mode)
|
|||
qedi_sync_free_irqs(qedi);
|
||||
|
||||
if (!test_bit(QEDI_IN_OFFLINE, &qedi->flags)) {
|
||||
qedi_ops->stop(qedi->cdev);
|
||||
while (retry--) {
|
||||
rval = qedi_ops->stop(qedi->cdev);
|
||||
if (rval < 0)
|
||||
msleep(1000);
|
||||
else
|
||||
break;
|
||||
}
|
||||
qedi_ops->ll2->stop(qedi->cdev);
|
||||
}
|
||||
|
||||
|
@ -2405,6 +2488,21 @@ static void __qedi_remove(struct pci_dev *pdev, int mode)
|
|||
}
|
||||
}
|
||||
|
||||
static void qedi_board_disable_work(struct work_struct *work)
|
||||
{
|
||||
struct qedi_ctx *qedi =
|
||||
container_of(work, struct qedi_ctx,
|
||||
board_disable_work.work);
|
||||
|
||||
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
|
||||
"Fan failure, Unloading firmware context.\n");
|
||||
|
||||
if (test_and_set_bit(QEDI_IN_SHUTDOWN, &qedi->flags))
|
||||
return;
|
||||
|
||||
__qedi_remove(qedi->pdev, QEDI_MODE_SHUTDOWN);
|
||||
}
|
||||
|
||||
static void qedi_shutdown(struct pci_dev *pdev)
|
||||
{
|
||||
struct qedi_ctx *qedi = pci_get_drvdata(pdev);
|
||||
|
@ -2427,6 +2525,7 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
|
|||
struct qed_probe_params qed_params;
|
||||
void *task_start, *task_end;
|
||||
int rc;
|
||||
u16 retry = 10;
|
||||
|
||||
if (mode != QEDI_MODE_RECOVERY) {
|
||||
qedi = qedi_host_alloc(pdev);
|
||||
|
@ -2438,6 +2537,10 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
|
|||
qedi = pci_get_drvdata(pdev);
|
||||
}
|
||||
|
||||
retry_probe:
|
||||
if (mode == QEDI_MODE_RECOVERY)
|
||||
msleep(2000);
|
||||
|
||||
memset(&qed_params, 0, sizeof(qed_params));
|
||||
qed_params.protocol = QED_PROTOCOL_ISCSI;
|
||||
qed_params.dp_module = qedi_qed_debug;
|
||||
|
@ -2445,11 +2548,20 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
|
|||
qed_params.is_vf = is_vf;
|
||||
qedi->cdev = qedi_ops->common->probe(pdev, &qed_params);
|
||||
if (!qedi->cdev) {
|
||||
if (mode == QEDI_MODE_RECOVERY && retry) {
|
||||
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
|
||||
"Retry %d initialize hardware\n", retry);
|
||||
retry--;
|
||||
goto retry_probe;
|
||||
}
|
||||
|
||||
rc = -ENODEV;
|
||||
QEDI_ERR(&qedi->dbg_ctx, "Cannot initialize hardware\n");
|
||||
goto free_host;
|
||||
}
|
||||
|
||||
set_bit(QEDI_ERR_ATTN_CLR_EN, &qedi->qedi_err_flags);
|
||||
set_bit(QEDI_ERR_IS_RECOVERABLE, &qedi->qedi_err_flags);
|
||||
atomic_set(&qedi->link_state, QEDI_LINK_DOWN);
|
||||
|
||||
rc = qedi_ops->fill_dev_info(qedi->cdev, &qedi->dev_info);
|
||||
|
@ -2533,7 +2645,7 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
|
|||
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_DISC, "MAC address is %pM.\n",
|
||||
qedi->mac);
|
||||
|
||||
sprintf(host_buf, "host_%d", qedi->shost->host_no);
|
||||
snprintf(host_buf, sizeof(host_buf), "host_%d", qedi->shost->host_no);
|
||||
qedi_ops->common->set_name(qedi->cdev, host_buf);
|
||||
|
||||
qedi_ops->register_ops(qedi->cdev, &qedi_cb_ops, qedi);
|
||||
|
@ -2658,6 +2770,8 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
|
|||
}
|
||||
|
||||
INIT_DELAYED_WORK(&qedi->recovery_work, qedi_recovery_handler);
|
||||
INIT_DELAYED_WORK(&qedi->board_disable_work,
|
||||
qedi_board_disable_work);
|
||||
|
||||
/* F/w needs 1st task context memory entry for performance */
|
||||
set_bit(QEDI_RESERVE_TASK_ID, qedi->task_idx_map);
|
||||
|
@ -2744,12 +2858,17 @@ MODULE_DEVICE_TABLE(pci, qedi_pci_tbl);
|
|||
|
||||
static enum cpuhp_state qedi_cpuhp_state;
|
||||
|
||||
static struct pci_error_handlers qedi_err_handler = {
|
||||
.error_detected = qedi_io_error_detected,
|
||||
};
|
||||
|
||||
static struct pci_driver qedi_pci_driver = {
|
||||
.name = QEDI_MODULE_NAME,
|
||||
.id_table = qedi_pci_tbl,
|
||||
.probe = qedi_probe,
|
||||
.remove = qedi_remove,
|
||||
.shutdown = qedi_shutdown,
|
||||
.err_handler = &qedi_err_handler,
|
||||
};
|
||||
|
||||
static int __init qedi_init(void)
|
||||
|
|
|
@ -1241,7 +1241,7 @@ qla1280_done(struct scsi_qla_host *ha)
|
|||
{
|
||||
struct srb *sp;
|
||||
struct list_head *done_q;
|
||||
int bus, target, lun;
|
||||
int bus, target;
|
||||
struct scsi_cmnd *cmd;
|
||||
|
||||
ENTER("qla1280_done");
|
||||
|
@ -1256,7 +1256,6 @@ qla1280_done(struct scsi_qla_host *ha)
|
|||
cmd = sp->cmd;
|
||||
bus = SCSI_BUS_32(cmd);
|
||||
target = SCSI_TCN_32(cmd);
|
||||
lun = SCSI_LUN_32(cmd);
|
||||
|
||||
switch ((CMD_RESULT(cmd) >> 16)) {
|
||||
case DID_RESET:
|
||||
|
@ -2185,13 +2184,12 @@ qla1280_nvram_config(struct scsi_qla_host *ha)
|
|||
nv->cntr_flags_1.disable_loading_risc_code;
|
||||
|
||||
if (IS_ISP1040(ha)) {
|
||||
uint16_t hwrev, cfg1, cdma_conf, ddma_conf;
|
||||
uint16_t hwrev, cfg1, cdma_conf;
|
||||
|
||||
hwrev = RD_REG_WORD(®->cfg_0) & ISP_CFG0_HWMSK;
|
||||
|
||||
cfg1 = RD_REG_WORD(®->cfg_1) & ~(BIT_4 | BIT_5 | BIT_6);
|
||||
cdma_conf = RD_REG_WORD(®->cdma_cfg);
|
||||
ddma_conf = RD_REG_WORD(®->ddma_cfg);
|
||||
|
||||
/* Busted fifo, says mjacob. */
|
||||
if (hwrev != ISP_CFG0_1040A)
|
||||
|
@ -2427,7 +2425,6 @@ qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb)
|
|||
int cnt;
|
||||
uint16_t *optr, *iptr;
|
||||
uint16_t __iomem *mptr;
|
||||
uint16_t data;
|
||||
DECLARE_COMPLETION_ONSTACK(wait);
|
||||
|
||||
ENTER("qla1280_mailbox_command");
|
||||
|
@ -2462,7 +2459,7 @@ qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb)
|
|||
|
||||
spin_unlock_irq(ha->host->host_lock);
|
||||
WRT_REG_WORD(®->host_cmd, HC_SET_HOST_INT);
|
||||
data = qla1280_debounce_register(®->istatus);
|
||||
qla1280_debounce_register(®->istatus);
|
||||
|
||||
wait_for_completion(&wait);
|
||||
del_timer_sync(&ha->mailbox_timer);
|
||||
|
@ -3604,7 +3601,6 @@ static void
|
|||
qla1280_status_entry(struct scsi_qla_host *ha, struct response *pkt,
|
||||
struct list_head *done_q)
|
||||
{
|
||||
unsigned int bus, target, lun;
|
||||
int sense_sz;
|
||||
struct srb *sp;
|
||||
struct scsi_cmnd *cmd;
|
||||
|
@ -3630,11 +3626,6 @@ qla1280_status_entry(struct scsi_qla_host *ha, struct response *pkt,
|
|||
|
||||
cmd = sp->cmd;
|
||||
|
||||
/* Generate LU queue on cntrl, target, LUN */
|
||||
bus = SCSI_BUS_32(cmd);
|
||||
target = SCSI_TCN_32(cmd);
|
||||
lun = SCSI_LUN_32(cmd);
|
||||
|
||||
if (comp_status || scsi_status) {
|
||||
dprintk(3, "scsi: comp_status = 0x%x, scsi_status = "
|
||||
"0x%x, handle = 0x%x\n", comp_status,
|
||||
|
@ -3673,7 +3664,8 @@ qla1280_status_entry(struct scsi_qla_host *ha, struct response *pkt,
|
|||
|
||||
dprintk(2, "qla1280_status_entry: Check "
|
||||
"condition Sense data, b %i, t %i, "
|
||||
"l %i\n", bus, target, lun);
|
||||
"l %i\n", SCSI_BUS_32(cmd), SCSI_TCN_32(cmd),
|
||||
SCSI_LUN_32(cmd));
|
||||
if (sense_sz)
|
||||
qla1280_dump_buffer(2,
|
||||
(char *)cmd->sense_buffer,
|
||||
|
|
|
@ -157,6 +157,14 @@ qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
|
|||
vha->host_no);
|
||||
}
|
||||
break;
|
||||
case 10:
|
||||
if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
|
||||
ql_log(ql_log_info, vha, 0x70e9,
|
||||
"Issuing MPI firmware dump on host#%ld.\n",
|
||||
vha->host_no);
|
||||
ha->isp_ops->mpi_fw_dump(vha, 0);
|
||||
}
|
||||
break;
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
@ -744,8 +752,6 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
|
|||
qla83xx_idc_audit(vha, IDC_AUDIT_TIMESTAMP);
|
||||
qla83xx_idc_unlock(vha, 0);
|
||||
break;
|
||||
} else if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
|
||||
qla27xx_reset_mpi(vha);
|
||||
} else {
|
||||
/* Make sure FC side is not in reset */
|
||||
WARN_ON_ONCE(qla2x00_wait_for_hba_online(vha) !=
|
||||
|
@ -2726,6 +2732,9 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
|
|||
struct link_statistics *stats;
|
||||
dma_addr_t stats_dma;
|
||||
struct fc_host_statistics *p = &vha->fc_host_stat;
|
||||
struct qla_qpair *qpair;
|
||||
int i;
|
||||
u64 ib = 0, ob = 0, ir = 0, or = 0;
|
||||
|
||||
memset(p, -1, sizeof(*p));
|
||||
|
||||
|
@ -2762,6 +2771,27 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
|
|||
if (rval != QLA_SUCCESS)
|
||||
goto done_free;
|
||||
|
||||
/* --- */
|
||||
for (i = 0; i < vha->hw->max_qpairs; i++) {
|
||||
qpair = vha->hw->queue_pair_map[i];
|
||||
if (!qpair)
|
||||
continue;
|
||||
ir += qpair->counters.input_requests;
|
||||
or += qpair->counters.output_requests;
|
||||
ib += qpair->counters.input_bytes;
|
||||
ob += qpair->counters.output_bytes;
|
||||
}
|
||||
ir += ha->base_qpair->counters.input_requests;
|
||||
or += ha->base_qpair->counters.output_requests;
|
||||
ib += ha->base_qpair->counters.input_bytes;
|
||||
ob += ha->base_qpair->counters.output_bytes;
|
||||
|
||||
ir += vha->qla_stats.input_requests;
|
||||
or += vha->qla_stats.output_requests;
|
||||
ib += vha->qla_stats.input_bytes;
|
||||
ob += vha->qla_stats.output_bytes;
|
||||
/* --- */
|
||||
|
||||
p->link_failure_count = le32_to_cpu(stats->link_fail_cnt);
|
||||
p->loss_of_sync_count = le32_to_cpu(stats->loss_sync_cnt);
|
||||
p->loss_of_signal_count = le32_to_cpu(stats->loss_sig_cnt);
|
||||
|
@ -2781,15 +2811,16 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
|
|||
p->rx_words = le64_to_cpu(stats->fpm_recv_word_cnt);
|
||||
p->tx_words = le64_to_cpu(stats->fpm_xmit_word_cnt);
|
||||
} else {
|
||||
p->rx_words = vha->qla_stats.input_bytes;
|
||||
p->tx_words = vha->qla_stats.output_bytes;
|
||||
p->rx_words = ib >> 2;
|
||||
p->tx_words = ob >> 2;
|
||||
}
|
||||
}
|
||||
|
||||
p->fcp_control_requests = vha->qla_stats.control_requests;
|
||||
p->fcp_input_requests = vha->qla_stats.input_requests;
|
||||
p->fcp_output_requests = vha->qla_stats.output_requests;
|
||||
p->fcp_input_megabytes = vha->qla_stats.input_bytes >> 20;
|
||||
p->fcp_output_megabytes = vha->qla_stats.output_bytes >> 20;
|
||||
p->fcp_input_requests = ir;
|
||||
p->fcp_output_requests = or;
|
||||
p->fcp_input_megabytes = ib >> 20;
|
||||
p->fcp_output_megabytes = ob >> 20;
|
||||
p->seconds_since_last_reset =
|
||||
get_jiffies_64() - vha->qla_stats.jiffies_at_last_reset;
|
||||
do_div(p->seconds_since_last_reset, HZ);
|
||||
|
@ -2809,9 +2840,18 @@ qla2x00_reset_host_stats(struct Scsi_Host *shost)
|
|||
struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
|
||||
struct link_statistics *stats;
|
||||
dma_addr_t stats_dma;
|
||||
int i;
|
||||
struct qla_qpair *qpair;
|
||||
|
||||
memset(&vha->qla_stats, 0, sizeof(vha->qla_stats));
|
||||
memset(&vha->fc_host_stat, 0, sizeof(vha->fc_host_stat));
|
||||
for (i = 0; i < vha->hw->max_qpairs; i++) {
|
||||
qpair = vha->hw->queue_pair_map[i];
|
||||
if (!qpair)
|
||||
continue;
|
||||
memset(&qpair->counters, 0, sizeof(qpair->counters));
|
||||
}
|
||||
memset(&ha->base_qpair->counters, 0, sizeof(qpair->counters));
|
||||
|
||||
vha->qla_stats.jiffies_at_last_reset = get_jiffies_64();
|
||||
|
||||
|
@ -3214,46 +3254,7 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha)
|
|||
fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
|
||||
fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
|
||||
|
||||
if (IS_CNA_CAPABLE(ha))
|
||||
speeds = FC_PORTSPEED_10GBIT;
|
||||
else if (IS_QLA28XX(ha) || IS_QLA27XX(ha)) {
|
||||
if (ha->max_supported_speed == 2) {
|
||||
if (ha->min_supported_speed <= 6)
|
||||
speeds |= FC_PORTSPEED_64GBIT;
|
||||
}
|
||||
if (ha->max_supported_speed == 2 ||
|
||||
ha->max_supported_speed == 1) {
|
||||
if (ha->min_supported_speed <= 5)
|
||||
speeds |= FC_PORTSPEED_32GBIT;
|
||||
}
|
||||
if (ha->max_supported_speed == 2 ||
|
||||
ha->max_supported_speed == 1 ||
|
||||
ha->max_supported_speed == 0) {
|
||||
if (ha->min_supported_speed <= 4)
|
||||
speeds |= FC_PORTSPEED_16GBIT;
|
||||
}
|
||||
if (ha->max_supported_speed == 1 ||
|
||||
ha->max_supported_speed == 0) {
|
||||
if (ha->min_supported_speed <= 3)
|
||||
speeds |= FC_PORTSPEED_8GBIT;
|
||||
}
|
||||
if (ha->max_supported_speed == 0) {
|
||||
if (ha->min_supported_speed <= 2)
|
||||
speeds |= FC_PORTSPEED_4GBIT;
|
||||
}
|
||||
} else if (IS_QLA2031(ha))
|
||||
speeds = FC_PORTSPEED_16GBIT|FC_PORTSPEED_8GBIT|
|
||||
FC_PORTSPEED_4GBIT;
|
||||
else if (IS_QLA25XX(ha) || IS_QLAFX00(ha))
|
||||
speeds = FC_PORTSPEED_8GBIT|FC_PORTSPEED_4GBIT|
|
||||
FC_PORTSPEED_2GBIT|FC_PORTSPEED_1GBIT;
|
||||
else if (IS_QLA24XX_TYPE(ha))
|
||||
speeds = FC_PORTSPEED_4GBIT|FC_PORTSPEED_2GBIT|
|
||||
FC_PORTSPEED_1GBIT;
|
||||
else if (IS_QLA23XX(ha))
|
||||
speeds = FC_PORTSPEED_2GBIT|FC_PORTSPEED_1GBIT;
|
||||
else
|
||||
speeds = FC_PORTSPEED_1GBIT;
|
||||
speeds = qla25xx_fdmi_port_speed_capability(ha);
|
||||
|
||||
fc_host_supported_speeds(vha->host) = speeds;
|
||||
}
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
* | Device Discovery | 0x2134 | 0x210e-0x2116 |
|
||||
* | | | 0x211a |
|
||||
* | | | 0x211c-0x2128 |
|
||||
* | | | 0x212a-0x2134 |
|
||||
* | | | 0x212c-0x2134 |
|
||||
* | Queue Command and IO tracing | 0x3074 | 0x300b |
|
||||
* | | | 0x3027-0x3028 |
|
||||
* | | | 0x303d-0x3041 |
|
||||
|
@ -2449,7 +2449,7 @@ static void ql_dbg_prefix(char *pbuf, int pbuf_size,
|
|||
const struct pci_dev *pdev = vha->hw->pdev;
|
||||
|
||||
/* <module-name> [<dev-name>]-<msg-id>:<host>: */
|
||||
snprintf(pbuf, pbuf_size, "%s [%s]-%04x:%ld: ", QL_MSGHDR,
|
||||
snprintf(pbuf, pbuf_size, "%s [%s]-%04x:%lu: ", QL_MSGHDR,
|
||||
dev_name(&(pdev->dev)), msg_id, vha->host_no);
|
||||
} else {
|
||||
/* <module-name> [<dev-name>]-<msg-id>: : */
|
||||
|
|
|
@ -624,6 +624,12 @@ enum {
|
|||
TYPE_TGT_TMCMD, /* task management */
|
||||
};
|
||||
|
||||
struct iocb_resource {
|
||||
u8 res_type;
|
||||
u8 pad;
|
||||
u16 iocb_cnt;
|
||||
};
|
||||
|
||||
typedef struct srb {
|
||||
/*
|
||||
* Do not move cmd_type field, it needs to
|
||||
|
@ -631,6 +637,7 @@ typedef struct srb {
|
|||
*/
|
||||
uint8_t cmd_type;
|
||||
uint8_t pad[3];
|
||||
struct iocb_resource iores;
|
||||
struct kref cmd_kref; /* need to migrate ref_count over to this */
|
||||
void *priv;
|
||||
wait_queue_head_t nvme_ls_waitq;
|
||||
|
@ -2443,12 +2450,6 @@ typedef struct fc_port {
|
|||
struct list_head list;
|
||||
struct scsi_qla_host *vha;
|
||||
|
||||
uint8_t node_name[WWN_SIZE];
|
||||
uint8_t port_name[WWN_SIZE];
|
||||
port_id_t d_id;
|
||||
uint16_t loop_id;
|
||||
uint16_t old_loop_id;
|
||||
|
||||
unsigned int conf_compl_supported:1;
|
||||
unsigned int deleted:2;
|
||||
unsigned int free_pending:1;
|
||||
|
@ -2465,15 +2466,24 @@ typedef struct fc_port {
|
|||
unsigned int n2n_flag:1;
|
||||
unsigned int explicit_logout:1;
|
||||
unsigned int prli_pend_timer:1;
|
||||
uint8_t nvme_flag;
|
||||
|
||||
uint8_t node_name[WWN_SIZE];
|
||||
uint8_t port_name[WWN_SIZE];
|
||||
port_id_t d_id;
|
||||
uint16_t loop_id;
|
||||
uint16_t old_loop_id;
|
||||
|
||||
struct completion nvme_del_done;
|
||||
uint32_t nvme_prli_service_param;
|
||||
#define NVME_PRLI_SP_PI_CTRL BIT_9
|
||||
#define NVME_PRLI_SP_SLER BIT_8
|
||||
#define NVME_PRLI_SP_CONF BIT_7
|
||||
#define NVME_PRLI_SP_INITIATOR BIT_5
|
||||
#define NVME_PRLI_SP_TARGET BIT_4
|
||||
#define NVME_PRLI_SP_DISCOVERY BIT_3
|
||||
#define NVME_PRLI_SP_FIRST_BURST BIT_0
|
||||
uint8_t nvme_flag;
|
||||
|
||||
uint32_t nvme_first_burst_size;
|
||||
#define NVME_FLAG_REGISTERED 4
|
||||
#define NVME_FLAG_DELETING 2
|
||||
|
@ -2544,6 +2554,8 @@ typedef struct fc_port {
|
|||
u8 last_login_state;
|
||||
u16 n2n_link_reset_cnt;
|
||||
u16 n2n_chip_reset;
|
||||
|
||||
struct dentry *dfs_rport_dir;
|
||||
} fc_port_t;
|
||||
|
||||
enum {
|
||||
|
@ -3508,6 +3520,14 @@ struct qla_tgt_counters {
|
|||
uint64_t num_term_xchg_sent;
|
||||
};
|
||||
|
||||
struct qla_counters {
|
||||
uint64_t input_bytes;
|
||||
uint64_t input_requests;
|
||||
uint64_t output_bytes;
|
||||
uint64_t output_requests;
|
||||
|
||||
};
|
||||
|
||||
struct qla_qpair;
|
||||
|
||||
/* Response queue data structure */
|
||||
|
@ -3566,6 +3586,15 @@ struct req_que {
|
|||
uint8_t req_pkt[REQUEST_ENTRY_SIZE];
|
||||
};
|
||||
|
||||
struct qla_fw_resources {
|
||||
u16 iocbs_total;
|
||||
u16 iocbs_limit;
|
||||
u16 iocbs_qp_limit;
|
||||
u16 iocbs_used;
|
||||
};
|
||||
|
||||
#define QLA_IOCB_PCT_LIMIT 95
|
||||
|
||||
/*Queue pair data structure */
|
||||
struct qla_qpair {
|
||||
spinlock_t qp_lock;
|
||||
|
@ -3592,6 +3621,7 @@ struct qla_qpair {
|
|||
uint32_t enable_class_2:1;
|
||||
uint32_t enable_explicit_conf:1;
|
||||
uint32_t use_shadow_reg:1;
|
||||
uint32_t rcv_intr:1;
|
||||
|
||||
uint16_t id; /* qp number used with FW */
|
||||
uint16_t vp_idx; /* vport ID */
|
||||
|
@ -3607,13 +3637,17 @@ struct qla_qpair {
|
|||
struct qla_msix_entry *msix; /* point to &ha->msix_entries[x] */
|
||||
struct qla_hw_data *hw;
|
||||
struct work_struct q_work;
|
||||
struct qla_counters counters;
|
||||
|
||||
struct list_head qp_list_elem; /* vha->qp_list */
|
||||
struct list_head hints_list;
|
||||
uint16_t cpuid;
|
||||
|
||||
uint16_t retry_term_cnt;
|
||||
__le32 retry_term_exchg_addr;
|
||||
uint64_t retry_term_jiff;
|
||||
struct qla_tgt_counters tgt_counters;
|
||||
uint16_t cpuid;
|
||||
struct qla_fw_resources fwres ____cacheline_aligned;
|
||||
};
|
||||
|
||||
/* Place holder for FW buffer parameters */
|
||||
|
@ -3881,6 +3915,7 @@ struct qla_hw_data {
|
|||
/* Enabled in Driver */
|
||||
uint32_t scm_enabled:1;
|
||||
uint32_t max_req_queue_warned:1;
|
||||
uint32_t plogi_template_valid:1;
|
||||
} flags;
|
||||
|
||||
uint16_t max_exchg;
|
||||
|
@ -4127,6 +4162,10 @@ struct qla_hw_data {
|
|||
#define USE_ASYNC_SCAN(ha) (IS_QLA25XX(ha) || IS_QLA81XX(ha) ||\
|
||||
IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))
|
||||
|
||||
#define IS_ZIO_THRESHOLD_CAPABLE(ha) \
|
||||
((IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha)) &&\
|
||||
(ha->zio_mode == QLA_ZIO_MODE_6))
|
||||
|
||||
/* HBA serial number */
|
||||
uint8_t serial0;
|
||||
uint8_t serial1;
|
||||
|
@ -4214,7 +4253,7 @@ struct qla_hw_data {
|
|||
/* Extended Logins */
|
||||
void *exlogin_buf;
|
||||
dma_addr_t exlogin_buf_dma;
|
||||
int exlogin_size;
|
||||
uint32_t exlogin_size;
|
||||
|
||||
#define ENABLE_EXCHANGE_OFFLD BIT_2
|
||||
|
||||
|
@ -4225,7 +4264,8 @@ struct qla_hw_data {
|
|||
int exchoffld_count;
|
||||
|
||||
/* n2n */
|
||||
struct els_plogi_payload plogi_els_payld;
|
||||
struct fc_els_flogi plogi_els_payld;
|
||||
#define LOGIN_TEMPLATE_SIZE (sizeof(struct fc_els_flogi) - 4)
|
||||
|
||||
void *swl;
|
||||
|
||||
|
@ -4273,6 +4313,7 @@ struct qla_hw_data {
|
|||
#define FW_ATTR_EXT0_SCM_BROCADE 0x00001000
|
||||
/* Cisco fabric attached */
|
||||
#define FW_ATTR_EXT0_SCM_CISCO 0x00002000
|
||||
#define FW_ATTR_EXT0_NVME2 BIT_13
|
||||
uint16_t fw_attributes_ext[2];
|
||||
uint32_t fw_memory_size;
|
||||
uint32_t fw_transfer_size;
|
||||
|
@ -4622,6 +4663,7 @@ typedef struct scsi_qla_host {
|
|||
uint32_t qpairs_rsp_created:1;
|
||||
uint32_t nvme_enabled:1;
|
||||
uint32_t nvme_first_burst:1;
|
||||
uint32_t nvme2_enabled:1;
|
||||
} flags;
|
||||
|
||||
atomic_t loop_state;
|
||||
|
@ -4780,6 +4822,8 @@ typedef struct scsi_qla_host {
|
|||
uint16_t ql2xexchoffld;
|
||||
uint16_t ql2xiniexchg;
|
||||
|
||||
struct dentry *dfs_rport_root;
|
||||
|
||||
struct purex_list {
|
||||
struct list_head head;
|
||||
spinlock_t lock;
|
||||
|
@ -5103,6 +5147,8 @@ struct sff_8247_a0 {
|
|||
ha->current_topology == ISP_CFG_N || \
|
||||
!ha->current_topology)
|
||||
|
||||
#define QLA_N2N_WAIT_TIME 5 /* 2 * ra_tov(n2n) + 1 */
|
||||
|
||||
#define NVME_TYPE(fcport) \
|
||||
(fcport->fc4_type & FS_FC4TYPE_NVME) \
|
||||
|
||||
|
|
|
@ -12,6 +12,140 @@
|
|||
static struct dentry *qla2x00_dfs_root;
|
||||
static atomic_t qla2x00_dfs_root_count;
|
||||
|
||||
#define QLA_DFS_RPORT_DEVLOSS_TMO 1
|
||||
|
||||
static int
|
||||
qla_dfs_rport_get(struct fc_port *fp, int attr_id, u64 *val)
|
||||
{
|
||||
switch (attr_id) {
|
||||
case QLA_DFS_RPORT_DEVLOSS_TMO:
|
||||
/* Only supported for FC-NVMe devices that are registered. */
|
||||
if (!(fp->nvme_flag & NVME_FLAG_REGISTERED))
|
||||
return -EIO;
|
||||
*val = fp->nvme_remote_port->dev_loss_tmo;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
qla_dfs_rport_set(struct fc_port *fp, int attr_id, u64 val)
|
||||
{
|
||||
switch (attr_id) {
|
||||
case QLA_DFS_RPORT_DEVLOSS_TMO:
|
||||
/* Only supported for FC-NVMe devices that are registered. */
|
||||
if (!(fp->nvme_flag & NVME_FLAG_REGISTERED))
|
||||
return -EIO;
|
||||
#if (IS_ENABLED(CONFIG_NVME_FC))
|
||||
return nvme_fc_set_remoteport_devloss(fp->nvme_remote_port,
|
||||
val);
|
||||
#else /* CONFIG_NVME_FC */
|
||||
return -EINVAL;
|
||||
#endif /* CONFIG_NVME_FC */
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define DEFINE_QLA_DFS_RPORT_RW_ATTR(_attr_id, _attr) \
|
||||
static int qla_dfs_rport_##_attr##_get(void *data, u64 *val) \
|
||||
{ \
|
||||
struct fc_port *fp = data; \
|
||||
return qla_dfs_rport_get(fp, _attr_id, val); \
|
||||
} \
|
||||
static int qla_dfs_rport_##_attr##_set(void *data, u64 val) \
|
||||
{ \
|
||||
struct fc_port *fp = data; \
|
||||
return qla_dfs_rport_set(fp, _attr_id, val); \
|
||||
} \
|
||||
DEFINE_DEBUGFS_ATTRIBUTE(qla_dfs_rport_##_attr##_fops, \
|
||||
qla_dfs_rport_##_attr##_get, \
|
||||
qla_dfs_rport_##_attr##_set, "%llu\n")
|
||||
|
||||
/*
|
||||
* Wrapper for getting fc_port fields.
|
||||
*
|
||||
* _attr : Attribute name.
|
||||
* _get_val : Accessor macro to retrieve the value.
|
||||
*/
|
||||
#define DEFINE_QLA_DFS_RPORT_FIELD_GET(_attr, _get_val) \
|
||||
static int qla_dfs_rport_field_##_attr##_get(void *data, u64 *val) \
|
||||
{ \
|
||||
struct fc_port *fp = data; \
|
||||
*val = _get_val; \
|
||||
return 0; \
|
||||
} \
|
||||
DEFINE_DEBUGFS_ATTRIBUTE(qla_dfs_rport_field_##_attr##_fops, \
|
||||
qla_dfs_rport_field_##_attr##_get, \
|
||||
NULL, "%llu\n")
|
||||
|
||||
#define DEFINE_QLA_DFS_RPORT_ACCESS(_attr, _get_val) \
|
||||
DEFINE_QLA_DFS_RPORT_FIELD_GET(_attr, _get_val)
|
||||
|
||||
#define DEFINE_QLA_DFS_RPORT_FIELD(_attr) \
|
||||
DEFINE_QLA_DFS_RPORT_FIELD_GET(_attr, fp->_attr)
|
||||
|
||||
DEFINE_QLA_DFS_RPORT_RW_ATTR(QLA_DFS_RPORT_DEVLOSS_TMO, dev_loss_tmo);
|
||||
|
||||
DEFINE_QLA_DFS_RPORT_FIELD(disc_state);
|
||||
DEFINE_QLA_DFS_RPORT_FIELD(scan_state);
|
||||
DEFINE_QLA_DFS_RPORT_FIELD(fw_login_state);
|
||||
DEFINE_QLA_DFS_RPORT_FIELD(login_pause);
|
||||
DEFINE_QLA_DFS_RPORT_FIELD(flags);
|
||||
DEFINE_QLA_DFS_RPORT_FIELD(nvme_flag);
|
||||
DEFINE_QLA_DFS_RPORT_FIELD(last_rscn_gen);
|
||||
DEFINE_QLA_DFS_RPORT_FIELD(rscn_gen);
|
||||
DEFINE_QLA_DFS_RPORT_FIELD(login_gen);
|
||||
DEFINE_QLA_DFS_RPORT_FIELD(loop_id);
|
||||
DEFINE_QLA_DFS_RPORT_FIELD_GET(port_id, fp->d_id.b24);
|
||||
DEFINE_QLA_DFS_RPORT_FIELD_GET(sess_kref, kref_read(&fp->sess_kref));
|
||||
|
||||
void
|
||||
qla2x00_dfs_create_rport(scsi_qla_host_t *vha, struct fc_port *fp)
|
||||
{
|
||||
char wwn[32];
|
||||
|
||||
#define QLA_CREATE_RPORT_FIELD_ATTR(_attr) \
|
||||
debugfs_create_file(#_attr, 0400, fp->dfs_rport_dir, \
|
||||
fp, &qla_dfs_rport_field_##_attr##_fops)
|
||||
|
||||
if (!vha->dfs_rport_root || fp->dfs_rport_dir)
|
||||
return;
|
||||
|
||||
sprintf(wwn, "pn-%016llx", wwn_to_u64(fp->port_name));
|
||||
fp->dfs_rport_dir = debugfs_create_dir(wwn, vha->dfs_rport_root);
|
||||
if (!fp->dfs_rport_dir)
|
||||
return;
|
||||
if (NVME_TARGET(vha->hw, fp))
|
||||
debugfs_create_file("dev_loss_tmo", 0600, fp->dfs_rport_dir,
|
||||
fp, &qla_dfs_rport_dev_loss_tmo_fops);
|
||||
|
||||
QLA_CREATE_RPORT_FIELD_ATTR(disc_state);
|
||||
QLA_CREATE_RPORT_FIELD_ATTR(scan_state);
|
||||
QLA_CREATE_RPORT_FIELD_ATTR(fw_login_state);
|
||||
QLA_CREATE_RPORT_FIELD_ATTR(login_pause);
|
||||
QLA_CREATE_RPORT_FIELD_ATTR(flags);
|
||||
QLA_CREATE_RPORT_FIELD_ATTR(nvme_flag);
|
||||
QLA_CREATE_RPORT_FIELD_ATTR(last_rscn_gen);
|
||||
QLA_CREATE_RPORT_FIELD_ATTR(rscn_gen);
|
||||
QLA_CREATE_RPORT_FIELD_ATTR(login_gen);
|
||||
QLA_CREATE_RPORT_FIELD_ATTR(loop_id);
|
||||
QLA_CREATE_RPORT_FIELD_ATTR(port_id);
|
||||
QLA_CREATE_RPORT_FIELD_ATTR(sess_kref);
|
||||
}
|
||||
|
||||
void
|
||||
qla2x00_dfs_remove_rport(scsi_qla_host_t *vha, struct fc_port *fp)
|
||||
{
|
||||
if (!vha->dfs_rport_root || !fp->dfs_rport_dir)
|
||||
return;
|
||||
debugfs_remove_recursive(fp->dfs_rport_dir);
|
||||
fp->dfs_rport_dir = NULL;
|
||||
}
|
||||
|
||||
static int
|
||||
qla2x00_dfs_tgt_sess_show(struct seq_file *s, void *unused)
|
||||
{
|
||||
|
@ -57,52 +191,52 @@ qla2x00_dfs_tgt_port_database_show(struct seq_file *s, void *unused)
|
|||
{
|
||||
scsi_qla_host_t *vha = s->private;
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
struct gid_list_info *gid_list, *gid;
|
||||
struct gid_list_info *gid_list;
|
||||
dma_addr_t gid_list_dma;
|
||||
fc_port_t fc_port;
|
||||
char *id_iter;
|
||||
int rc, i;
|
||||
uint16_t entries, loop_id;
|
||||
struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
|
||||
|
||||
seq_printf(s, "%s\n", vha->host_str);
|
||||
if (tgt) {
|
||||
gid_list = dma_alloc_coherent(&ha->pdev->dev,
|
||||
qla2x00_gid_list_size(ha),
|
||||
&gid_list_dma, GFP_KERNEL);
|
||||
if (!gid_list) {
|
||||
ql_dbg(ql_dbg_user, vha, 0x7018,
|
||||
"DMA allocation failed for %u\n",
|
||||
qla2x00_gid_list_size(ha));
|
||||
return 0;
|
||||
}
|
||||
|
||||
rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma,
|
||||
&entries);
|
||||
if (rc != QLA_SUCCESS)
|
||||
goto out_free_id_list;
|
||||
|
||||
gid = gid_list;
|
||||
|
||||
seq_puts(s, "Port Name Port ID Loop ID\n");
|
||||
|
||||
for (i = 0; i < entries; i++) {
|
||||
loop_id = le16_to_cpu(gid->loop_id);
|
||||
memset(&fc_port, 0, sizeof(fc_port_t));
|
||||
|
||||
fc_port.loop_id = loop_id;
|
||||
|
||||
rc = qla24xx_gpdb_wait(vha, &fc_port, 0);
|
||||
seq_printf(s, "%8phC %02x%02x%02x %d\n",
|
||||
fc_port.port_name, fc_port.d_id.b.domain,
|
||||
fc_port.d_id.b.area, fc_port.d_id.b.al_pa,
|
||||
fc_port.loop_id);
|
||||
gid = (void *)gid + ha->gid_list_info_size;
|
||||
}
|
||||
out_free_id_list:
|
||||
dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
|
||||
gid_list, gid_list_dma);
|
||||
gid_list = dma_alloc_coherent(&ha->pdev->dev,
|
||||
qla2x00_gid_list_size(ha),
|
||||
&gid_list_dma, GFP_KERNEL);
|
||||
if (!gid_list) {
|
||||
ql_dbg(ql_dbg_user, vha, 0x7018,
|
||||
"DMA allocation failed for %u\n",
|
||||
qla2x00_gid_list_size(ha));
|
||||
return 0;
|
||||
}
|
||||
|
||||
rc = qla24xx_gidlist_wait(vha, gid_list, gid_list_dma,
|
||||
&entries);
|
||||
if (rc != QLA_SUCCESS)
|
||||
goto out_free_id_list;
|
||||
|
||||
id_iter = (char *)gid_list;
|
||||
|
||||
seq_puts(s, "Port Name Port ID Loop ID\n");
|
||||
|
||||
for (i = 0; i < entries; i++) {
|
||||
struct gid_list_info *gid =
|
||||
(struct gid_list_info *)id_iter;
|
||||
loop_id = le16_to_cpu(gid->loop_id);
|
||||
memset(&fc_port, 0, sizeof(fc_port_t));
|
||||
|
||||
fc_port.loop_id = loop_id;
|
||||
|
||||
rc = qla24xx_gpdb_wait(vha, &fc_port, 0);
|
||||
seq_printf(s, "%8phC %02x%02x%02x %d\n",
|
||||
fc_port.port_name, fc_port.d_id.b.domain,
|
||||
fc_port.d_id.b.area, fc_port.d_id.b.al_pa,
|
||||
fc_port.loop_id);
|
||||
id_iter += ha->gid_list_info_size;
|
||||
}
|
||||
out_free_id_list:
|
||||
dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
|
||||
gid_list, gid_list_dma);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -127,6 +261,8 @@ qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
|
|||
struct scsi_qla_host *vha = s->private;
|
||||
uint16_t mb[MAX_IOCB_MB_REG];
|
||||
int rc;
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
u16 iocbs_used, i;
|
||||
|
||||
rc = qla24xx_res_count_wait(vha, mb, SIZEOF_IOCB_MB_REG);
|
||||
if (rc != QLA_SUCCESS) {
|
||||
|
@ -151,6 +287,18 @@ qla_dfs_fw_resource_cnt_show(struct seq_file *s, void *unused)
|
|||
mb[23]);
|
||||
}
|
||||
|
||||
if (ql2xenforce_iocb_limit) {
|
||||
/* lock is not require. It's an estimate. */
|
||||
iocbs_used = ha->base_qpair->fwres.iocbs_used;
|
||||
for (i = 0; i < ha->max_qpairs; i++) {
|
||||
if (ha->queue_pair_map[i])
|
||||
iocbs_used += ha->queue_pair_map[i]->fwres.iocbs_used;
|
||||
}
|
||||
|
||||
seq_printf(s, "Driver: estimate iocb used [%d] high water limit [%d]\n",
|
||||
iocbs_used, ha->base_qpair->fwres.iocbs_limit);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -473,9 +621,21 @@ qla2x00_dfs_setup(scsi_qla_host_t *vha)
|
|||
ha->tgt.dfs_tgt_sess = debugfs_create_file("tgt_sess",
|
||||
S_IRUSR, ha->dfs_dir, vha, &dfs_tgt_sess_ops);
|
||||
|
||||
if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha))
|
||||
if (IS_QLA27XX(ha) || IS_QLA83XX(ha) || IS_QLA28XX(ha)) {
|
||||
ha->tgt.dfs_naqp = debugfs_create_file("naqp",
|
||||
0400, ha->dfs_dir, vha, &dfs_naqp_ops);
|
||||
if (!ha->tgt.dfs_naqp) {
|
||||
ql_log(ql_log_warn, vha, 0xd011,
|
||||
"Unable to create debugFS naqp node.\n");
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
vha->dfs_rport_root = debugfs_create_dir("rports", ha->dfs_dir);
|
||||
if (!vha->dfs_rport_root) {
|
||||
ql_log(ql_log_warn, vha, 0xd012,
|
||||
"Unable to create debugFS rports node.\n");
|
||||
goto out;
|
||||
}
|
||||
out:
|
||||
return 0;
|
||||
}
|
||||
|
@ -515,6 +675,11 @@ qla2x00_dfs_remove(scsi_qla_host_t *vha)
|
|||
ha->dfs_fce = NULL;
|
||||
}
|
||||
|
||||
if (vha->dfs_rport_root) {
|
||||
debugfs_remove_recursive(vha->dfs_rport_root);
|
||||
vha->dfs_rport_root = NULL;
|
||||
}
|
||||
|
||||
if (ha->dfs_dir) {
|
||||
debugfs_remove(ha->dfs_dir);
|
||||
ha->dfs_dir = NULL;
|
||||
|
|
|
@ -619,7 +619,7 @@ struct sts_entry_24xx {
|
|||
#define SF_NVME_ERSP BIT_6
|
||||
#define SF_FCP_RSP_DMA BIT_0
|
||||
|
||||
__le16 retry_delay;
|
||||
__le16 status_qualifier;
|
||||
__le16 scsi_status; /* SCSI status. */
|
||||
#define SS_CONFIRMATION_REQ BIT_12
|
||||
|
||||
|
|
|
@ -129,6 +129,8 @@ int qla2x00_reserve_mgmt_server_loop_id(scsi_qla_host_t *);
|
|||
void qla_rscn_replay(fc_port_t *fcport);
|
||||
void qla24xx_free_purex_item(struct purex_item *item);
|
||||
extern bool qla24xx_risc_firmware_invalid(uint32_t *);
|
||||
void qla_init_iocb_limit(scsi_qla_host_t *);
|
||||
|
||||
|
||||
/*
|
||||
* Global Data in qla_os.c source file.
|
||||
|
@ -175,6 +177,7 @@ extern int qla2xuseresexchforels;
|
|||
extern int ql2xexlogins;
|
||||
extern int ql2xdifbundlinginternalbuffers;
|
||||
extern int ql2xfulldump_on_mpifail;
|
||||
extern int ql2xenforce_iocb_limit;
|
||||
|
||||
extern int qla2x00_loop_reset(scsi_qla_host_t *);
|
||||
extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
|
||||
|
@ -704,6 +707,8 @@ int qla24xx_async_gfpnid(scsi_qla_host_t *, fc_port_t *);
|
|||
void qla24xx_handle_gfpnid_event(scsi_qla_host_t *, struct event_arg *);
|
||||
void qla24xx_sp_unmap(scsi_qla_host_t *, srb_t *);
|
||||
void qla_scan_work_fn(struct work_struct *);
|
||||
uint qla25xx_fdmi_port_speed_capability(struct qla_hw_data *);
|
||||
uint qla25xx_fdmi_port_speed_currently(struct qla_hw_data *);
|
||||
|
||||
/*
|
||||
* Global Function Prototypes in qla_attr.c source file.
|
||||
|
@ -935,9 +940,10 @@ void qlt_clr_qp_table(struct scsi_qla_host *vha);
|
|||
void qlt_set_mode(struct scsi_qla_host *);
|
||||
int qla2x00_set_data_rate(scsi_qla_host_t *vha, uint16_t mode);
|
||||
extern void qla24xx_process_purex_list(struct purex_list *);
|
||||
extern void qla2x00_dfs_create_rport(scsi_qla_host_t *vha, struct fc_port *fp);
|
||||
extern void qla2x00_dfs_remove_rport(scsi_qla_host_t *vha, struct fc_port *fp);
|
||||
|
||||
/* nvme.c */
|
||||
void qla_nvme_unregister_remote_port(struct fc_port *fcport);
|
||||
void qla27xx_reset_mpi(scsi_qla_host_t *vha);
|
||||
void qla_handle_els_plogi_done(scsi_qla_host_t *vha, struct event_arg *ea);
|
||||
#endif /* _QLA_GBL_H */
|
||||
|
|
|
@ -1502,7 +1502,7 @@ qla2x00_prep_ct_fdmi_req(struct ct_sns_pkt *p, uint16_t cmd,
|
|||
return &p->p.req;
|
||||
}
|
||||
|
||||
static uint
|
||||
uint
|
||||
qla25xx_fdmi_port_speed_capability(struct qla_hw_data *ha)
|
||||
{
|
||||
uint speeds = 0;
|
||||
|
@ -1546,7 +1546,7 @@ qla25xx_fdmi_port_speed_capability(struct qla_hw_data *ha)
|
|||
}
|
||||
return speeds;
|
||||
}
|
||||
if (IS_QLA25XX(ha))
|
||||
if (IS_QLA25XX(ha) || IS_QLAFX00(ha))
|
||||
return FDMI_PORT_SPEED_8GB|FDMI_PORT_SPEED_4GB|
|
||||
FDMI_PORT_SPEED_2GB|FDMI_PORT_SPEED_1GB;
|
||||
if (IS_QLA24XX_TYPE(ha))
|
||||
|
@ -1556,7 +1556,8 @@ qla25xx_fdmi_port_speed_capability(struct qla_hw_data *ha)
|
|||
return FDMI_PORT_SPEED_2GB|FDMI_PORT_SPEED_1GB;
|
||||
return FDMI_PORT_SPEED_1GB;
|
||||
}
|
||||
static uint
|
||||
|
||||
uint
|
||||
qla25xx_fdmi_port_speed_currently(struct qla_hw_data *ha)
|
||||
{
|
||||
switch (ha->link_data_rate) {
|
||||
|
|
|
@ -63,6 +63,16 @@ void qla2x00_sp_free(srb_t *sp)
|
|||
qla2x00_rel_sp(sp);
|
||||
}
|
||||
|
||||
void qla2xxx_rel_done_warning(srb_t *sp, int res)
|
||||
{
|
||||
WARN_ONCE(1, "Calling done() of an already freed srb %p object\n", sp);
|
||||
}
|
||||
|
||||
void qla2xxx_rel_free_warning(srb_t *sp)
|
||||
{
|
||||
WARN_ONCE(1, "Calling free() of an already freed srb %p object\n", sp);
|
||||
}
|
||||
|
||||
/* Asynchronous Login/Logout Routines -------------------------------------- */
|
||||
|
||||
unsigned long
|
||||
|
@ -3288,6 +3298,8 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
|
|||
j, fwdt->dump_size);
|
||||
dump_size += fwdt->dump_size;
|
||||
}
|
||||
/* Add space for spare MPI fw dump. */
|
||||
dump_size += ha->fwdt[1].dump_size;
|
||||
} else {
|
||||
req_q_size = req->length * sizeof(request_t);
|
||||
rsp_q_size = rsp->length * sizeof(response_t);
|
||||
|
@ -3622,6 +3634,31 @@ qla24xx_detect_sfp(scsi_qla_host_t *vha)
|
|||
return ha->flags.lr_detected;
|
||||
}
|
||||
|
||||
void qla_init_iocb_limit(scsi_qla_host_t *vha)
|
||||
{
|
||||
u16 i, num_qps;
|
||||
u32 limit;
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
|
||||
num_qps = ha->num_qpairs + 1;
|
||||
limit = (ha->orig_fw_iocb_count * QLA_IOCB_PCT_LIMIT) / 100;
|
||||
|
||||
ha->base_qpair->fwres.iocbs_total = ha->orig_fw_iocb_count;
|
||||
ha->base_qpair->fwres.iocbs_limit = limit;
|
||||
ha->base_qpair->fwres.iocbs_qp_limit = limit / num_qps;
|
||||
ha->base_qpair->fwres.iocbs_used = 0;
|
||||
for (i = 0; i < ha->max_qpairs; i++) {
|
||||
if (ha->queue_pair_map[i]) {
|
||||
ha->queue_pair_map[i]->fwres.iocbs_total =
|
||||
ha->orig_fw_iocb_count;
|
||||
ha->queue_pair_map[i]->fwres.iocbs_limit = limit;
|
||||
ha->queue_pair_map[i]->fwres.iocbs_qp_limit =
|
||||
limit / num_qps;
|
||||
ha->queue_pair_map[i]->fwres.iocbs_used = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* qla2x00_setup_chip() - Load and start RISC firmware.
|
||||
* @vha: HA context
|
||||
|
@ -3690,9 +3727,7 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
|
|||
goto execute_fw_with_lr;
|
||||
}
|
||||
|
||||
if ((IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
|
||||
IS_QLA28XX(ha)) &&
|
||||
(ha->zio_mode == QLA_ZIO_MODE_6))
|
||||
if (IS_ZIO_THRESHOLD_CAPABLE(ha))
|
||||
qla27xx_set_zio_threshold(vha,
|
||||
ha->last_zio_threshold);
|
||||
|
||||
|
@ -3723,6 +3758,7 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
|
|||
MIN_MULTI_ID_FABRIC - 1;
|
||||
}
|
||||
qla2x00_get_resource_cnts(vha);
|
||||
qla_init_iocb_limit(vha);
|
||||
|
||||
/*
|
||||
* Allocate the array of outstanding commands
|
||||
|
@ -4957,6 +4993,29 @@ qla2x00_free_fcport(fc_port_t *fcport)
|
|||
kfree(fcport);
|
||||
}
|
||||
|
||||
static void qla_get_login_template(scsi_qla_host_t *vha)
|
||||
{
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
int rval;
|
||||
u32 *bp, sz;
|
||||
__be32 *q;
|
||||
|
||||
memset(ha->init_cb, 0, ha->init_cb_size);
|
||||
sz = min_t(int, sizeof(struct fc_els_flogi), ha->init_cb_size);
|
||||
rval = qla24xx_get_port_login_templ(vha, ha->init_cb_dma,
|
||||
ha->init_cb, sz);
|
||||
if (rval != QLA_SUCCESS) {
|
||||
ql_dbg(ql_dbg_init, vha, 0x00d1,
|
||||
"PLOGI ELS param read fail.\n");
|
||||
return;
|
||||
}
|
||||
q = (__be32 *)&ha->plogi_els_payld.fl_csp;
|
||||
|
||||
bp = (uint32_t *)ha->init_cb;
|
||||
cpu_to_be32_array(q, bp, sz / 4);
|
||||
ha->flags.plogi_template_valid = 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* qla2x00_configure_loop
|
||||
* Updates Fibre Channel Device Database with what is actually on loop.
|
||||
|
@ -5000,6 +5059,7 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
|
|||
clear_bit(RSCN_UPDATE, &vha->dpc_flags);
|
||||
|
||||
qla2x00_get_data_rate(vha);
|
||||
qla_get_login_template(vha);
|
||||
|
||||
/* Determine what we need to do */
|
||||
if ((ha->current_topology == ISP_CFG_FL ||
|
||||
|
@ -5084,32 +5144,11 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
|
|||
|
||||
static int qla2x00_configure_n2n_loop(scsi_qla_host_t *vha)
|
||||
{
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
unsigned long flags;
|
||||
fc_port_t *fcport;
|
||||
int rval;
|
||||
|
||||
if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags)) {
|
||||
/* borrowing */
|
||||
u32 *bp, sz;
|
||||
|
||||
memset(ha->init_cb, 0, ha->init_cb_size);
|
||||
sz = min_t(int, sizeof(struct els_plogi_payload),
|
||||
ha->init_cb_size);
|
||||
rval = qla24xx_get_port_login_templ(vha, ha->init_cb_dma,
|
||||
ha->init_cb, sz);
|
||||
if (rval == QLA_SUCCESS) {
|
||||
__be32 *q = &ha->plogi_els_payld.data[0];
|
||||
|
||||
bp = (uint32_t *)ha->init_cb;
|
||||
cpu_to_be32_array(q, bp, sz / 4);
|
||||
memcpy(bp, q, sizeof(ha->plogi_els_payld.data));
|
||||
} else {
|
||||
ql_dbg(ql_dbg_init, vha, 0x00d1,
|
||||
"PLOGI ELS param read fail.\n");
|
||||
goto skip_login;
|
||||
}
|
||||
}
|
||||
if (test_and_clear_bit(N2N_LOGIN_NEEDED, &vha->dpc_flags))
|
||||
set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
|
||||
|
||||
list_for_each_entry(fcport, &vha->vp_fcports, list) {
|
||||
if (fcport->n2n_flag) {
|
||||
|
@ -5118,7 +5157,6 @@ static int qla2x00_configure_n2n_loop(scsi_qla_host_t *vha)
|
|||
}
|
||||
}
|
||||
|
||||
skip_login:
|
||||
spin_lock_irqsave(&vha->work_lock, flags);
|
||||
vha->scan.scan_retry++;
|
||||
spin_unlock_irqrestore(&vha->work_lock, flags);
|
||||
|
@ -5486,6 +5524,8 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
|
|||
|
||||
qla2x00_iidma_fcport(vha, fcport);
|
||||
|
||||
qla2x00_dfs_create_rport(vha, fcport);
|
||||
|
||||
if (NVME_TARGET(vha->hw, fcport)) {
|
||||
qla_nvme_register_remote(vha, fcport);
|
||||
qla2x00_set_fcport_disc_state(fcport, DSC_LOGIN_COMPLETE);
|
||||
|
@ -7109,10 +7149,9 @@ qla24xx_reset_adapter(scsi_qla_host_t *vha)
|
|||
unsigned long flags = 0;
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
|
||||
int rval = QLA_SUCCESS;
|
||||
|
||||
if (IS_P3P_TYPE(ha))
|
||||
return rval;
|
||||
return QLA_SUCCESS;
|
||||
|
||||
vha->flags.online = 0;
|
||||
ha->isp_ops->disable_intrs(ha);
|
||||
|
@ -7127,7 +7166,7 @@ qla24xx_reset_adapter(scsi_qla_host_t *vha)
|
|||
if (IS_NOPOLLING_TYPE(ha))
|
||||
ha->isp_ops->enable_intrs(ha);
|
||||
|
||||
return rval;
|
||||
return QLA_SUCCESS;
|
||||
}
|
||||
|
||||
/* On sparc systems, obtain port and node WWN from firmware
|
||||
|
|
|
@ -207,10 +207,15 @@ qla2xxx_get_qpair_sp(scsi_qla_host_t *vha, struct qla_qpair *qpair,
|
|||
return sp;
|
||||
}
|
||||
|
||||
void qla2xxx_rel_done_warning(srb_t *sp, int res);
|
||||
void qla2xxx_rel_free_warning(srb_t *sp);
|
||||
|
||||
static inline void
|
||||
qla2xxx_rel_qpair_sp(struct qla_qpair *qpair, srb_t *sp)
|
||||
{
|
||||
sp->qpair = NULL;
|
||||
sp->done = qla2xxx_rel_done_warning;
|
||||
sp->free = qla2xxx_rel_free_warning;
|
||||
mempool_free(sp, qpair->srb_mempool);
|
||||
QLA_QPAIR_MARK_NOT_BUSY(qpair);
|
||||
}
|
||||
|
@ -266,11 +271,41 @@ qla2x00_handle_mbx_completion(struct qla_hw_data *ha, int status)
|
|||
}
|
||||
|
||||
static inline void
|
||||
qla2x00_set_retry_delay_timestamp(fc_port_t *fcport, uint16_t retry_delay)
|
||||
qla2x00_set_retry_delay_timestamp(fc_port_t *fcport, uint16_t sts_qual)
|
||||
{
|
||||
if (retry_delay)
|
||||
fcport->retry_delay_timestamp = jiffies +
|
||||
(retry_delay * HZ / 10);
|
||||
u8 scope;
|
||||
u16 qual;
|
||||
#define SQ_SCOPE_MASK 0xc000 /* SAM-6 rev5 5.3.2 */
|
||||
#define SQ_SCOPE_SHIFT 14
|
||||
#define SQ_QUAL_MASK 0x3fff
|
||||
|
||||
#define SQ_MAX_WAIT_SEC 60 /* Max I/O hold off time in seconds. */
|
||||
#define SQ_MAX_WAIT_TIME (SQ_MAX_WAIT_SEC * 10) /* in 100ms. */
|
||||
|
||||
if (!sts_qual) /* Common case. */
|
||||
return;
|
||||
|
||||
scope = (sts_qual & SQ_SCOPE_MASK) >> SQ_SCOPE_SHIFT;
|
||||
/* Handle only scope 1 or 2, which is for I-T nexus. */
|
||||
if (scope != 1 && scope != 2)
|
||||
return;
|
||||
|
||||
/* Skip processing, if retry delay timer is already in effect. */
|
||||
if (fcport->retry_delay_timestamp &&
|
||||
time_before(jiffies, fcport->retry_delay_timestamp))
|
||||
return;
|
||||
|
||||
qual = sts_qual & SQ_QUAL_MASK;
|
||||
if (qual < 1 || qual > 0x3fef)
|
||||
return;
|
||||
qual = min(qual, (u16)SQ_MAX_WAIT_TIME);
|
||||
|
||||
/* qual is expressed in 100ms increments. */
|
||||
fcport->retry_delay_timestamp = jiffies + (qual * HZ / 10);
|
||||
|
||||
ql_log(ql_log_warn, fcport->vha, 0x5101,
|
||||
"%8phC: I/O throttling requested (status qualifier = %04xh), holding off I/Os for %ums.\n",
|
||||
fcport->port_name, sts_qual, qual * 100);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
|
@ -343,3 +378,58 @@ qla2xxx_get_fc4_priority(struct scsi_qla_host *vha)
|
|||
|
||||
return (data >> 6) & BIT_0 ? FC4_PRIORITY_FCP : FC4_PRIORITY_NVME;
|
||||
}
|
||||
|
||||
enum {
|
||||
RESOURCE_NONE,
|
||||
RESOURCE_INI,
|
||||
};
|
||||
|
||||
static inline int
|
||||
qla_get_iocbs(struct qla_qpair *qp, struct iocb_resource *iores)
|
||||
{
|
||||
u16 iocbs_used, i;
|
||||
struct qla_hw_data *ha = qp->vha->hw;
|
||||
|
||||
if (!ql2xenforce_iocb_limit) {
|
||||
iores->res_type = RESOURCE_NONE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if ((iores->iocb_cnt + qp->fwres.iocbs_used) < qp->fwres.iocbs_qp_limit) {
|
||||
qp->fwres.iocbs_used += iores->iocb_cnt;
|
||||
return 0;
|
||||
} else {
|
||||
/* no need to acquire qpair lock. It's just rough calculation */
|
||||
iocbs_used = ha->base_qpair->fwres.iocbs_used;
|
||||
for (i = 0; i < ha->max_qpairs; i++) {
|
||||
if (ha->queue_pair_map[i])
|
||||
iocbs_used += ha->queue_pair_map[i]->fwres.iocbs_used;
|
||||
}
|
||||
|
||||
if ((iores->iocb_cnt + iocbs_used) < qp->fwres.iocbs_limit) {
|
||||
qp->fwres.iocbs_used += iores->iocb_cnt;
|
||||
return 0;
|
||||
} else {
|
||||
iores->res_type = RESOURCE_NONE;
|
||||
return -ENOSPC;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
qla_put_iocbs(struct qla_qpair *qp, struct iocb_resource *iores)
|
||||
{
|
||||
switch (iores->res_type) {
|
||||
case RESOURCE_NONE:
|
||||
break;
|
||||
default:
|
||||
if (qp->fwres.iocbs_used >= iores->iocb_cnt) {
|
||||
qp->fwres.iocbs_used -= iores->iocb_cnt;
|
||||
} else {
|
||||
// should not happen
|
||||
qp->fwres.iocbs_used = 0;
|
||||
}
|
||||
break;
|
||||
}
|
||||
iores->res_type = RESOURCE_NONE;
|
||||
}
|
||||
|
|
|
@ -594,6 +594,7 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
|
|||
uint32_t dsd_list_len;
|
||||
struct dsd_dma *dsd_ptr;
|
||||
struct ct6_dsd *ctx;
|
||||
struct qla_qpair *qpair = sp->qpair;
|
||||
|
||||
cmd = GET_CMD_SP(sp);
|
||||
|
||||
|
@ -612,12 +613,12 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
|
|||
/* Set transfer direction */
|
||||
if (cmd->sc_data_direction == DMA_TO_DEVICE) {
|
||||
cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
|
||||
vha->qla_stats.output_bytes += scsi_bufflen(cmd);
|
||||
vha->qla_stats.output_requests++;
|
||||
qpair->counters.output_bytes += scsi_bufflen(cmd);
|
||||
qpair->counters.output_requests++;
|
||||
} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
|
||||
cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
|
||||
vha->qla_stats.input_bytes += scsi_bufflen(cmd);
|
||||
vha->qla_stats.input_requests++;
|
||||
qpair->counters.input_bytes += scsi_bufflen(cmd);
|
||||
qpair->counters.input_requests++;
|
||||
}
|
||||
|
||||
cur_seg = scsi_sglist(cmd);
|
||||
|
@ -704,6 +705,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
|
|||
struct scsi_cmnd *cmd;
|
||||
struct scatterlist *sg;
|
||||
int i;
|
||||
struct qla_qpair *qpair = sp->qpair;
|
||||
|
||||
cmd = GET_CMD_SP(sp);
|
||||
|
||||
|
@ -721,12 +723,12 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
|
|||
/* Set transfer direction */
|
||||
if (cmd->sc_data_direction == DMA_TO_DEVICE) {
|
||||
cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_WRITE_DATA);
|
||||
vha->qla_stats.output_bytes += scsi_bufflen(cmd);
|
||||
vha->qla_stats.output_requests++;
|
||||
qpair->counters.output_bytes += scsi_bufflen(cmd);
|
||||
qpair->counters.output_requests++;
|
||||
} else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
|
||||
cmd_pkt->task_mgmt_flags = cpu_to_le16(TMF_READ_DATA);
|
||||
vha->qla_stats.input_bytes += scsi_bufflen(cmd);
|
||||
vha->qla_stats.input_requests++;
|
||||
qpair->counters.input_bytes += scsi_bufflen(cmd);
|
||||
qpair->counters.input_requests++;
|
||||
}
|
||||
|
||||
/* One DSD is available in the Command Type 3 IOCB */
|
||||
|
@ -1635,6 +1637,12 @@ qla24xx_start_scsi(srb_t *sp)
|
|||
|
||||
tot_dsds = nseg;
|
||||
req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
|
||||
|
||||
sp->iores.res_type = RESOURCE_INI;
|
||||
sp->iores.iocb_cnt = req_cnt;
|
||||
if (qla_get_iocbs(sp->qpair, &sp->iores))
|
||||
goto queuing_error;
|
||||
|
||||
if (req->cnt < (req_cnt + 2)) {
|
||||
cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
|
||||
rd_reg_dword_relaxed(req->req_q_out);
|
||||
|
@ -1707,6 +1715,7 @@ qla24xx_start_scsi(srb_t *sp)
|
|||
if (tot_dsds)
|
||||
scsi_dma_unmap(cmd);
|
||||
|
||||
qla_put_iocbs(sp->qpair, &sp->iores);
|
||||
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
||||
|
||||
return QLA_FUNCTION_FAILED;
|
||||
|
@ -1820,6 +1829,12 @@ qla24xx_dif_start_scsi(srb_t *sp)
|
|||
/* Total Data and protection sg segment(s) */
|
||||
tot_prot_dsds = nseg;
|
||||
tot_dsds += nseg;
|
||||
|
||||
sp->iores.res_type = RESOURCE_INI;
|
||||
sp->iores.iocb_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
|
||||
if (qla_get_iocbs(sp->qpair, &sp->iores))
|
||||
goto queuing_error;
|
||||
|
||||
if (req->cnt < (req_cnt + 2)) {
|
||||
cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
|
||||
rd_reg_dword_relaxed(req->req_q_out);
|
||||
|
@ -1894,6 +1909,7 @@ qla24xx_dif_start_scsi(srb_t *sp)
|
|||
}
|
||||
/* Cleanup will be performed by the caller (queuecommand) */
|
||||
|
||||
qla_put_iocbs(sp->qpair, &sp->iores);
|
||||
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
||||
return QLA_FUNCTION_FAILED;
|
||||
}
|
||||
|
@ -1955,6 +1971,12 @@ qla2xxx_start_scsi_mq(srb_t *sp)
|
|||
|
||||
tot_dsds = nseg;
|
||||
req_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
|
||||
|
||||
sp->iores.res_type = RESOURCE_INI;
|
||||
sp->iores.iocb_cnt = req_cnt;
|
||||
if (qla_get_iocbs(sp->qpair, &sp->iores))
|
||||
goto queuing_error;
|
||||
|
||||
if (req->cnt < (req_cnt + 2)) {
|
||||
cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
|
||||
rd_reg_dword_relaxed(req->req_q_out);
|
||||
|
@ -2027,6 +2049,7 @@ qla2xxx_start_scsi_mq(srb_t *sp)
|
|||
if (tot_dsds)
|
||||
scsi_dma_unmap(cmd);
|
||||
|
||||
qla_put_iocbs(sp->qpair, &sp->iores);
|
||||
spin_unlock_irqrestore(&qpair->qp_lock, flags);
|
||||
|
||||
return QLA_FUNCTION_FAILED;
|
||||
|
@ -2155,6 +2178,12 @@ qla2xxx_dif_start_scsi_mq(srb_t *sp)
|
|||
/* Total Data and protection sg segment(s) */
|
||||
tot_prot_dsds = nseg;
|
||||
tot_dsds += nseg;
|
||||
|
||||
sp->iores.res_type = RESOURCE_INI;
|
||||
sp->iores.iocb_cnt = qla24xx_calc_iocbs(vha, tot_dsds);
|
||||
if (qla_get_iocbs(sp->qpair, &sp->iores))
|
||||
goto queuing_error;
|
||||
|
||||
if (req->cnt < (req_cnt + 2)) {
|
||||
cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr :
|
||||
rd_reg_dword_relaxed(req->req_q_out);
|
||||
|
@ -2232,6 +2261,7 @@ qla2xxx_dif_start_scsi_mq(srb_t *sp)
|
|||
}
|
||||
/* Cleanup will be performed by the caller (queuecommand) */
|
||||
|
||||
qla_put_iocbs(sp->qpair, &sp->iores);
|
||||
spin_unlock_irqrestore(&qpair->qp_lock, flags);
|
||||
return QLA_FUNCTION_FAILED;
|
||||
}
|
||||
|
@ -2348,6 +2378,14 @@ qla24xx_prli_iocb(srb_t *sp, struct logio_entry_24xx *logio)
|
|||
if (sp->vha->flags.nvme_first_burst)
|
||||
logio->io_parameter[0] =
|
||||
cpu_to_le32(NVME_PRLI_SP_FIRST_BURST);
|
||||
if (sp->vha->flags.nvme2_enabled) {
|
||||
/* Set service parameter BIT_8 for SLER support */
|
||||
logio->io_parameter[0] |=
|
||||
cpu_to_le32(NVME_PRLI_SP_SLER);
|
||||
/* Set service parameter BIT_9 for PI control support */
|
||||
logio->io_parameter[0] |=
|
||||
cpu_to_le32(NVME_PRLI_SP_PI_CTRL);
|
||||
}
|
||||
}
|
||||
|
||||
logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
|
||||
|
@ -2975,8 +3013,7 @@ qla24xx_els_dcmd2_iocb(scsi_qla_host_t *vha, int els_opcode,
|
|||
memset(ptr, 0, sizeof(struct els_plogi_payload));
|
||||
memset(resp_ptr, 0, sizeof(struct els_plogi_payload));
|
||||
memcpy(elsio->u.els_plogi.els_plogi_pyld->data,
|
||||
&ha->plogi_els_payld.data,
|
||||
sizeof(elsio->u.els_plogi.els_plogi_pyld->data));
|
||||
&ha->plogi_els_payld.fl_csp, LOGIN_TEMPLATE_SIZE);
|
||||
|
||||
elsio->u.els_plogi.els_cmd = els_opcode;
|
||||
elsio->u.els_plogi.els_plogi_pyld->opcode = els_opcode;
|
||||
|
|
|
@ -767,7 +767,7 @@ qla27xx_handle_8200_aen(scsi_qla_host_t *vha, uint16_t *mb)
|
|||
ql_log(ql_log_warn, vha, 0x02f0,
|
||||
"MPI Heartbeat stop. MPI reset is%s needed. "
|
||||
"MB0[%xh] MB1[%xh] MB2[%xh] MB3[%xh]\n",
|
||||
mb[0] & BIT_8 ? "" : " not",
|
||||
mb[1] & BIT_8 ? "" : " not",
|
||||
mb[0], mb[1], mb[2], mb[3]);
|
||||
|
||||
if ((mb[1] & BIT_8) == 0)
|
||||
|
@ -1716,35 +1716,35 @@ qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
|
|||
{
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
sts_entry_t *pkt = iocb;
|
||||
srb_t *sp = NULL;
|
||||
srb_t *sp;
|
||||
uint16_t index;
|
||||
|
||||
index = LSW(pkt->handle);
|
||||
if (index >= req->num_outstanding_cmds) {
|
||||
ql_log(ql_log_warn, vha, 0x5031,
|
||||
"Invalid command index (%x) type %8ph.\n",
|
||||
index, iocb);
|
||||
"%s: Invalid command index (%x) type %8ph.\n",
|
||||
func, index, iocb);
|
||||
if (IS_P3P_TYPE(ha))
|
||||
set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
|
||||
else
|
||||
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
|
||||
goto done;
|
||||
return NULL;
|
||||
}
|
||||
sp = req->outstanding_cmds[index];
|
||||
if (!sp) {
|
||||
ql_log(ql_log_warn, vha, 0x5032,
|
||||
"Invalid completion handle (%x) -- timed-out.\n", index);
|
||||
return sp;
|
||||
"%s: Invalid completion handle (%x) -- timed-out.\n",
|
||||
func, index);
|
||||
return NULL;
|
||||
}
|
||||
if (sp->handle != index) {
|
||||
ql_log(ql_log_warn, vha, 0x5033,
|
||||
"SRB handle (%x) mismatch %x.\n", sp->handle, index);
|
||||
"%s: SRB handle (%x) mismatch %x.\n", func,
|
||||
sp->handle, index);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
req->outstanding_cmds[index] = NULL;
|
||||
|
||||
done:
|
||||
return sp;
|
||||
}
|
||||
|
||||
|
@ -2855,7 +2855,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
|
|||
int logit = 1;
|
||||
int res = 0;
|
||||
uint16_t state_flags = 0;
|
||||
uint16_t retry_delay = 0;
|
||||
uint16_t sts_qual = 0;
|
||||
|
||||
if (IS_FWI2_CAPABLE(ha)) {
|
||||
comp_status = le16_to_cpu(sts24->comp_status);
|
||||
|
@ -2901,6 +2901,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
|
|||
}
|
||||
return;
|
||||
}
|
||||
qla_put_iocbs(sp->qpair, &sp->iores);
|
||||
|
||||
if (sp->cmd_type != TYPE_SRB) {
|
||||
req->outstanding_cmds[handle] = NULL;
|
||||
|
@ -2953,8 +2954,6 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
|
|||
sense_len = par_sense_len = rsp_info_len = resid_len =
|
||||
fw_resid_len = 0;
|
||||
if (IS_FWI2_CAPABLE(ha)) {
|
||||
u16 sts24_retry_delay = le16_to_cpu(sts24->retry_delay);
|
||||
|
||||
if (scsi_status & SS_SENSE_LEN_VALID)
|
||||
sense_len = le32_to_cpu(sts24->sense_len);
|
||||
if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
|
||||
|
@ -2968,13 +2967,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
|
|||
host_to_fcp_swap(sts24->data, sizeof(sts24->data));
|
||||
ox_id = le16_to_cpu(sts24->ox_id);
|
||||
par_sense_len = sizeof(sts24->data);
|
||||
/* Valid values of the retry delay timer are 0x1-0xffef */
|
||||
if (sts24_retry_delay > 0 && sts24_retry_delay < 0xfff1) {
|
||||
retry_delay = sts24_retry_delay & 0x3fff;
|
||||
ql_dbg(ql_dbg_io, sp->vha, 0x3033,
|
||||
"%s: scope=%#x retry_delay=%#x\n", __func__,
|
||||
sts24_retry_delay >> 14, retry_delay);
|
||||
}
|
||||
sts_qual = le16_to_cpu(sts24->status_qualifier);
|
||||
} else {
|
||||
if (scsi_status & SS_SENSE_LEN_VALID)
|
||||
sense_len = le16_to_cpu(sts->req_sense_length);
|
||||
|
@ -3012,9 +3005,9 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
|
|||
* Check retry_delay_timer value if we receive a busy or
|
||||
* queue full.
|
||||
*/
|
||||
if (lscsi_status == SAM_STAT_TASK_SET_FULL ||
|
||||
lscsi_status == SAM_STAT_BUSY)
|
||||
qla2x00_set_retry_delay_timestamp(fcport, retry_delay);
|
||||
if (unlikely(lscsi_status == SAM_STAT_TASK_SET_FULL ||
|
||||
lscsi_status == SAM_STAT_BUSY))
|
||||
qla2x00_set_retry_delay_timestamp(fcport, sts_qual);
|
||||
|
||||
/*
|
||||
* Based on Host and scsi status generate status code for Linux
|
||||
|
@ -3321,6 +3314,7 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
|
|||
default:
|
||||
sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
|
||||
if (sp) {
|
||||
qla_put_iocbs(sp->qpair, &sp->iores);
|
||||
sp->done(sp, res);
|
||||
return 0;
|
||||
}
|
||||
|
@ -3406,6 +3400,32 @@ void qla24xx_nvme_ls4_iocb(struct scsi_qla_host *vha,
|
|||
sp->done(sp, comp_status);
|
||||
}
|
||||
|
||||
static void qla24xx_process_mbx_iocb_response(struct scsi_qla_host *vha,
|
||||
struct rsp_que *rsp, struct sts_entry_24xx *pkt)
|
||||
{
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
srb_t *sp;
|
||||
static const char func[] = "MBX-IOCB2";
|
||||
|
||||
sp = qla2x00_get_sp_from_handle(vha, func, rsp->req, pkt);
|
||||
if (!sp)
|
||||
return;
|
||||
|
||||
if (sp->type == SRB_SCSI_CMD ||
|
||||
sp->type == SRB_NVME_CMD ||
|
||||
sp->type == SRB_TM_CMD) {
|
||||
ql_log(ql_log_warn, vha, 0x509d,
|
||||
"Inconsistent event entry type %d\n", sp->type);
|
||||
if (IS_P3P_TYPE(ha))
|
||||
set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
|
||||
else
|
||||
set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
|
||||
return;
|
||||
}
|
||||
|
||||
qla24xx_mbx_iocb_entry(vha, rsp->req, (struct mbx_24xx_entry *)pkt);
|
||||
}
|
||||
|
||||
/**
|
||||
* qla24xx_process_response_queue() - Process response queue entries.
|
||||
* @vha: SCSI driver HA context
|
||||
|
@ -3422,8 +3442,10 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
|
|||
if (!ha->flags.fw_started)
|
||||
return;
|
||||
|
||||
if (rsp->qpair->cpuid != smp_processor_id())
|
||||
if (rsp->qpair->cpuid != smp_processor_id() || !rsp->qpair->rcv_intr) {
|
||||
rsp->qpair->rcv_intr = 1;
|
||||
qla_cpu_update(rsp->qpair, smp_processor_id());
|
||||
}
|
||||
|
||||
while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
|
||||
pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
|
||||
|
@ -3513,8 +3535,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
|
|||
(struct abort_entry_24xx *)pkt);
|
||||
break;
|
||||
case MBX_IOCB_TYPE:
|
||||
qla24xx_mbx_iocb_entry(vha, rsp->req,
|
||||
(struct mbx_24xx_entry *)pkt);
|
||||
qla24xx_process_mbx_iocb_response(vha, rsp, pkt);
|
||||
break;
|
||||
case VP_CTRL_IOCB_TYPE:
|
||||
qla_ctrlvp_completed(vha, rsp->req,
|
||||
|
@ -3873,7 +3894,7 @@ qla2xxx_msix_rsp_q(int irq, void *dev_id)
|
|||
}
|
||||
ha = qpair->hw;
|
||||
|
||||
queue_work(ha->wq, &qpair->q_work);
|
||||
queue_work_on(smp_processor_id(), ha->wq, &qpair->q_work);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
@ -3899,7 +3920,7 @@ qla2xxx_msix_rsp_q_hs(int irq, void *dev_id)
|
|||
wrt_reg_dword(®->hccr, HCCRX_CLR_RISC_INT);
|
||||
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
||||
|
||||
queue_work(ha->wq, &qpair->q_work);
|
||||
queue_work_on(smp_processor_id(), ha->wq, &qpair->q_work);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
|
|
@ -845,7 +845,7 @@ qla_get_exlogin_status(scsi_qla_host_t *vha, uint16_t *buf_sz,
|
|||
* Context:
|
||||
* Kernel context.
|
||||
*/
|
||||
#define CONFIG_XLOGINS_MEM 0x3
|
||||
#define CONFIG_XLOGINS_MEM 0x9
|
||||
int
|
||||
qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
|
||||
{
|
||||
|
@ -872,8 +872,9 @@ qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
|
|||
mcp->flags = 0;
|
||||
rval = qla2x00_mailbox_command(vha, mcp);
|
||||
if (rval != QLA_SUCCESS) {
|
||||
/*EMPTY*/
|
||||
ql_dbg(ql_dbg_mbx, vha, 0x111b, "Failed=%x.\n", rval);
|
||||
ql_dbg(ql_dbg_mbx, vha, 0x111b,
|
||||
"EXlogin Failed=%x. MB0=%x MB11=%x\n",
|
||||
rval, mcp->mb[0], mcp->mb[11]);
|
||||
} else {
|
||||
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c,
|
||||
"Done %s.\n", __func__);
|
||||
|
@ -1092,6 +1093,14 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
|
|||
"%s: FC-NVMe is Enabled (0x%x)\n",
|
||||
__func__, ha->fw_attributes_h);
|
||||
}
|
||||
|
||||
/* BIT_13 of Extended FW Attributes informs about NVMe2 support */
|
||||
if (ha->fw_attributes_ext[0] & FW_ATTR_EXT0_NVME2) {
|
||||
ql_log(ql_log_info, vha, 0xd302,
|
||||
"Firmware supports NVMe2 0x%x\n",
|
||||
ha->fw_attributes_ext[0]);
|
||||
vha->flags.nvme2_enabled = 1;
|
||||
}
|
||||
}
|
||||
|
||||
if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
|
||||
|
@ -1121,12 +1130,18 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
|
|||
if (ha->flags.scm_supported_a &&
|
||||
(ha->fw_attributes_ext[0] & FW_ATTR_EXT0_SCM_SUPPORTED)) {
|
||||
ha->flags.scm_supported_f = 1;
|
||||
memset(ha->sf_init_cb, 0, sizeof(struct init_sf_cb));
|
||||
ha->sf_init_cb->flags |= BIT_13;
|
||||
}
|
||||
ql_log(ql_log_info, vha, 0x11a3, "SCM in FW: %s\n",
|
||||
(ha->flags.scm_supported_f) ? "Supported" :
|
||||
"Not Supported");
|
||||
|
||||
if (vha->flags.nvme2_enabled) {
|
||||
/* set BIT_15 of special feature control block for SLER */
|
||||
ha->sf_init_cb->flags |= BIT_15;
|
||||
/* set BIT_14 of special feature control block for PI CTRL*/
|
||||
ha->sf_init_cb->flags |= BIT_14;
|
||||
}
|
||||
}
|
||||
|
||||
failed:
|
||||
|
@ -1822,7 +1837,7 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
|
|||
mcp->out_mb |= MBX_14|MBX_13|MBX_12|MBX_11|MBX_10;
|
||||
}
|
||||
|
||||
if (ha->flags.scm_supported_f) {
|
||||
if (ha->flags.scm_supported_f || vha->flags.nvme2_enabled) {
|
||||
mcp->mb[1] |= BIT_1;
|
||||
mcp->mb[16] = MSW(ha->sf_init_cb_dma);
|
||||
mcp->mb[17] = LSW(ha->sf_init_cb_dma);
|
||||
|
@ -3979,7 +3994,8 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
|
|||
|
||||
if (fcport) {
|
||||
fcport->plogi_nack_done_deadline = jiffies + HZ;
|
||||
fcport->dm_login_expire = jiffies + 2*HZ;
|
||||
fcport->dm_login_expire = jiffies +
|
||||
QLA_N2N_WAIT_TIME * HZ;
|
||||
fcport->scan_state = QLA_FCPORT_FOUND;
|
||||
fcport->n2n_flag = 1;
|
||||
fcport->keep_nport_handle = 1;
|
||||
|
@ -4925,8 +4941,6 @@ qla25xx_set_els_cmds_supported(scsi_qla_host_t *vha)
|
|||
return QLA_MEMORY_ALLOC_FAILED;
|
||||
}
|
||||
|
||||
memset(els_cmd_map, 0, ELS_CMD_MAP_SIZE);
|
||||
|
||||
/* List of Purex ELS */
|
||||
cmd_opcode[0] = ELS_FPIN;
|
||||
cmd_opcode[1] = ELS_RDP;
|
||||
|
@ -4958,51 +4972,12 @@ qla25xx_set_els_cmds_supported(scsi_qla_host_t *vha)
|
|||
"Done %s.\n", __func__);
|
||||
}
|
||||
|
||||
dma_free_coherent(&ha->pdev->dev, DMA_POOL_SIZE,
|
||||
dma_free_coherent(&ha->pdev->dev, ELS_CMD_MAP_SIZE,
|
||||
els_cmd_map, els_cmd_map_dma);
|
||||
|
||||
return rval;
|
||||
}
|
||||
|
||||
int
|
||||
qla24xx_get_buffer_credits(scsi_qla_host_t *vha, struct buffer_credit_24xx *bbc,
|
||||
dma_addr_t bbc_dma)
|
||||
{
|
||||
mbx_cmd_t mc;
|
||||
mbx_cmd_t *mcp = &mc;
|
||||
int rval;
|
||||
|
||||
if (!IS_FWI2_CAPABLE(vha->hw))
|
||||
return QLA_FUNCTION_FAILED;
|
||||
|
||||
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118e,
|
||||
"Entered %s.\n", __func__);
|
||||
|
||||
mcp->mb[0] = MBC_GET_RNID_PARAMS;
|
||||
mcp->mb[1] = RNID_BUFFER_CREDITS << 8;
|
||||
mcp->mb[2] = MSW(LSD(bbc_dma));
|
||||
mcp->mb[3] = LSW(LSD(bbc_dma));
|
||||
mcp->mb[6] = MSW(MSD(bbc_dma));
|
||||
mcp->mb[7] = LSW(MSD(bbc_dma));
|
||||
mcp->mb[8] = sizeof(*bbc) / sizeof(*bbc->parameter);
|
||||
mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
|
||||
mcp->in_mb = MBX_1|MBX_0;
|
||||
mcp->buf_size = sizeof(*bbc);
|
||||
mcp->flags = MBX_DMA_IN;
|
||||
mcp->tov = MBX_TOV_SECONDS;
|
||||
rval = qla2x00_mailbox_command(vha, mcp);
|
||||
|
||||
if (rval != QLA_SUCCESS) {
|
||||
ql_dbg(ql_dbg_mbx, vha, 0x118f,
|
||||
"Failed=%x mb[0]=%x,%x.\n", rval, mcp->mb[0], mcp->mb[1]);
|
||||
} else {
|
||||
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1190,
|
||||
"Done %s.\n", __func__);
|
||||
}
|
||||
|
||||
return rval;
|
||||
}
|
||||
|
||||
static int
|
||||
qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp)
|
||||
{
|
||||
|
|
|
@ -808,11 +808,9 @@ static void qla_do_work(struct work_struct *work)
|
|||
{
|
||||
unsigned long flags;
|
||||
struct qla_qpair *qpair = container_of(work, struct qla_qpair, q_work);
|
||||
struct scsi_qla_host *vha;
|
||||
struct qla_hw_data *ha = qpair->hw;
|
||||
struct scsi_qla_host *vha = qpair->vha;
|
||||
|
||||
spin_lock_irqsave(&qpair->qp_lock, flags);
|
||||
vha = pci_get_drvdata(ha->pdev);
|
||||
qla24xx_process_response_queue(vha, qpair->rsp);
|
||||
spin_unlock_irqrestore(&qpair->qp_lock, flags);
|
||||
|
||||
|
|
|
@ -42,7 +42,7 @@ int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
|
|||
req.port_name = wwn_to_u64(fcport->port_name);
|
||||
req.node_name = wwn_to_u64(fcport->node_name);
|
||||
req.port_role = 0;
|
||||
req.dev_loss_tmo = NVME_FC_DEV_LOSS_TMO;
|
||||
req.dev_loss_tmo = 0;
|
||||
|
||||
if (fcport->nvme_prli_service_param & NVME_PRLI_SP_INITIATOR)
|
||||
req.port_role = FC_PORT_ROLE_NVME_INITIATOR;
|
||||
|
@ -69,6 +69,14 @@ int qla_nvme_register_remote(struct scsi_qla_host *vha, struct fc_port *fcport)
|
|||
return ret;
|
||||
}
|
||||
|
||||
if (fcport->nvme_prli_service_param & NVME_PRLI_SP_SLER)
|
||||
ql_log(ql_log_info, vha, 0x212a,
|
||||
"PortID:%06x Supports SLER\n", req.port_id);
|
||||
|
||||
if (fcport->nvme_prli_service_param & NVME_PRLI_SP_PI_CTRL)
|
||||
ql_log(ql_log_info, vha, 0x212b,
|
||||
"PortID:%06x Supports PI control\n", req.port_id);
|
||||
|
||||
rport = fcport->nvme_remote_port->private;
|
||||
rport->fcport = fcport;
|
||||
|
||||
|
@ -368,6 +376,7 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
|
|||
struct srb_iocb *nvme = &sp->u.iocb_cmd;
|
||||
struct scatterlist *sgl, *sg;
|
||||
struct nvmefc_fcp_req *fd = nvme->u.nvme.desc;
|
||||
struct nvme_fc_cmd_iu *cmd = fd->cmdaddr;
|
||||
uint32_t rval = QLA_SUCCESS;
|
||||
|
||||
/* Setup qpair pointers */
|
||||
|
@ -399,8 +408,6 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
|
|||
}
|
||||
|
||||
if (unlikely(!fd->sqid)) {
|
||||
struct nvme_fc_cmd_iu *cmd = fd->cmdaddr;
|
||||
|
||||
if (cmd->sqe.common.opcode == nvme_admin_async_event) {
|
||||
nvme->u.nvme.aen_op = 1;
|
||||
atomic_inc(&ha->nvme_active_aen_cnt);
|
||||
|
@ -428,8 +435,8 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
|
|||
/* No data transfer how do we check buffer len == 0?? */
|
||||
if (fd->io_dir == NVMEFC_FCP_READ) {
|
||||
cmd_pkt->control_flags = cpu_to_le16(CF_READ_DATA);
|
||||
vha->qla_stats.input_bytes += fd->payload_length;
|
||||
vha->qla_stats.input_requests++;
|
||||
qpair->counters.input_bytes += fd->payload_length;
|
||||
qpair->counters.input_requests++;
|
||||
} else if (fd->io_dir == NVMEFC_FCP_WRITE) {
|
||||
cmd_pkt->control_flags = cpu_to_le16(CF_WRITE_DATA);
|
||||
if ((vha->flags.nvme_first_burst) &&
|
||||
|
@ -441,11 +448,16 @@ static inline int qla2x00_start_nvme_mq(srb_t *sp)
|
|||
cmd_pkt->control_flags |=
|
||||
cpu_to_le16(CF_NVME_FIRST_BURST_ENABLE);
|
||||
}
|
||||
vha->qla_stats.output_bytes += fd->payload_length;
|
||||
vha->qla_stats.output_requests++;
|
||||
qpair->counters.output_bytes += fd->payload_length;
|
||||
qpair->counters.output_requests++;
|
||||
} else if (fd->io_dir == 0) {
|
||||
cmd_pkt->control_flags = 0;
|
||||
}
|
||||
/* Set BIT_13 of control flags for Async event */
|
||||
if (vha->flags.nvme2_enabled &&
|
||||
cmd->sqe.common.opcode == nvme_admin_async_event) {
|
||||
cmd_pkt->control_flags |= cpu_to_le16(CF_ADMIN_ASYNC_EVENT);
|
||||
}
|
||||
|
||||
/* Set NPORT-ID */
|
||||
cmd_pkt->nport_handle = cpu_to_le16(sp->fcport->loop_id);
|
||||
|
@ -548,6 +560,14 @@ static int qla_nvme_post_cmd(struct nvme_fc_local_port *lport,
|
|||
return rval;
|
||||
|
||||
vha = fcport->vha;
|
||||
|
||||
if (!(fcport->nvme_flag & NVME_FLAG_REGISTERED))
|
||||
return rval;
|
||||
|
||||
if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
|
||||
(qpair && !qpair->fw_started) || fcport->deleted)
|
||||
return -EBUSY;
|
||||
|
||||
/*
|
||||
* If we know the dev is going away while the transport is still sending
|
||||
* IO's return busy back to stall the IO Q. This happens when the
|
||||
|
@ -683,7 +703,7 @@ int qla_nvme_register_hba(struct scsi_qla_host *vha)
|
|||
struct nvme_fc_port_template *tmpl;
|
||||
struct qla_hw_data *ha;
|
||||
struct nvme_fc_port_info pinfo;
|
||||
int ret = EINVAL;
|
||||
int ret = -EINVAL;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_NVME_FC))
|
||||
return ret;
|
||||
|
|
|
@ -14,9 +14,6 @@
|
|||
#include "qla_def.h"
|
||||
#include "qla_dsd.h"
|
||||
|
||||
/* default dev loss time (seconds) before transport tears down ctrl */
|
||||
#define NVME_FC_DEV_LOSS_TMO 30
|
||||
|
||||
#define NVME_ATIO_CMD_OFF 32
|
||||
#define NVME_FIRST_PACKET_CMDLEN (64 - NVME_ATIO_CMD_OFF)
|
||||
#define Q2T_NVME_NUM_TAGS 2048
|
||||
|
@ -57,6 +54,7 @@ struct cmd_nvme {
|
|||
uint64_t rsvd;
|
||||
|
||||
__le16 control_flags; /* Control Flags */
|
||||
#define CF_ADMIN_ASYNC_EVENT BIT_13
|
||||
#define CF_NVME_FIRST_BURST_ENABLE BIT_11
|
||||
#define CF_DIF_SEG_DESCR_ENABLE BIT_3
|
||||
#define CF_DATA_SEG_DESCR_ENABLE BIT_2
|
||||
|
|
|
@ -40,6 +40,11 @@ module_param(ql2xfulldump_on_mpifail, int, S_IRUGO | S_IWUSR);
|
|||
MODULE_PARM_DESC(ql2xfulldump_on_mpifail,
|
||||
"Set this to take full dump on MPI hang.");
|
||||
|
||||
int ql2xenforce_iocb_limit = 1;
|
||||
module_param(ql2xenforce_iocb_limit, int, S_IRUGO | S_IWUSR);
|
||||
MODULE_PARM_DESC(ql2xenforce_iocb_limit,
|
||||
"Enforce IOCB throttling, to avoid FW congestion. (default: 0)");
|
||||
|
||||
/*
|
||||
* CT6 CTX allocation cache
|
||||
*/
|
||||
|
@ -1885,7 +1890,7 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
|
|||
if (!dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) {
|
||||
/* Any upper-dword bits set? */
|
||||
if (MSD(dma_get_required_mask(&ha->pdev->dev)) &&
|
||||
!pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
|
||||
!dma_set_coherent_mask(&ha->pdev->dev, DMA_BIT_MASK(64))) {
|
||||
/* Ok, a 64bit DMA mask is applicable. */
|
||||
ha->flags.enable_64bit_addressing = 1;
|
||||
ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
|
||||
|
@ -1895,7 +1900,7 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
|
|||
}
|
||||
|
||||
dma_set_mask(&ha->pdev->dev, DMA_BIT_MASK(32));
|
||||
pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(32));
|
||||
dma_set_coherent_mask(&ha->pdev->dev, DMA_BIT_MASK(32));
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -3316,6 +3321,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
|
|||
for (i = 0; i < ha->max_qpairs; i++)
|
||||
qla2xxx_create_qpair(base_vha, 5, 0, startit);
|
||||
}
|
||||
qla_init_iocb_limit(base_vha);
|
||||
|
||||
if (ha->flags.running_gold_fw)
|
||||
goto skip_dpc;
|
||||
|
@ -4225,6 +4231,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
|
|||
&ha->sf_init_cb_dma);
|
||||
if (!ha->sf_init_cb)
|
||||
goto fail_sf_init_cb;
|
||||
memset(ha->sf_init_cb, 0, sizeof(struct init_sf_cb));
|
||||
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0199,
|
||||
"sf_init_cb=%p.\n", ha->sf_init_cb);
|
||||
}
|
||||
|
@ -4379,11 +4386,12 @@ int
|
|||
qla2x00_set_exlogins_buffer(scsi_qla_host_t *vha)
|
||||
{
|
||||
int rval;
|
||||
uint16_t size, max_cnt, temp;
|
||||
uint16_t size, max_cnt;
|
||||
uint32_t temp;
|
||||
struct qla_hw_data *ha = vha->hw;
|
||||
|
||||
/* Return if we don't need to alloacate any extended logins */
|
||||
if (!ql2xexlogins)
|
||||
if (ql2xexlogins <= MAX_FIBRE_DEVICES_2400)
|
||||
return QLA_SUCCESS;
|
||||
|
||||
if (!IS_EXLOGIN_OFFLD_CAPABLE(ha))
|
||||
|
@ -4872,7 +4880,7 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
|
|||
}
|
||||
INIT_DELAYED_WORK(&vha->scan.scan_work, qla_scan_work_fn);
|
||||
|
||||
sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no);
|
||||
sprintf(vha->host_str, "%s_%lu", QLA2XXX_DRIVER_NAME, vha->host_no);
|
||||
ql_dbg(ql_dbg_init, vha, 0x0041,
|
||||
"Allocated the host=%p hw=%p vha=%p dev_name=%s",
|
||||
vha->host, vha->hw, vha,
|
||||
|
@ -5001,7 +5009,7 @@ qla2x00_uevent_emit(struct scsi_qla_host *vha, u32 code)
|
|||
|
||||
switch (code) {
|
||||
case QLA_UEVENT_CODE_FW_DUMP:
|
||||
snprintf(event_string, sizeof(event_string), "FW_DUMP=%ld",
|
||||
snprintf(event_string, sizeof(event_string), "FW_DUMP=%lu",
|
||||
vha->host_no);
|
||||
break;
|
||||
default:
|
||||
|
@ -5089,6 +5097,8 @@ void qla24xx_create_new_sess(struct scsi_qla_host *vha, struct qla_work_evt *e)
|
|||
|
||||
fcport->fc4_type = e->u.new_sess.fc4_type;
|
||||
if (e->u.new_sess.fc4_type & FS_FCP_IS_N2N) {
|
||||
fcport->dm_login_expire = jiffies +
|
||||
QLA_N2N_WAIT_TIME * HZ;
|
||||
fcport->fc4_type = FS_FC4TYPE_FCP;
|
||||
fcport->n2n_flag = 1;
|
||||
if (vha->flags.nvme_enabled)
|
||||
|
@ -5810,98 +5820,6 @@ qla25xx_rdp_rsp_reduce_size(struct scsi_qla_host *vha,
|
|||
return true;
|
||||
}
|
||||
|
||||
static uint
|
||||
qla25xx_rdp_port_speed_capability(struct qla_hw_data *ha)
|
||||
{
|
||||
if (IS_CNA_CAPABLE(ha))
|
||||
return RDP_PORT_SPEED_10GB;
|
||||
|
||||
if (IS_QLA27XX(ha) || IS_QLA28XX(ha)) {
|
||||
unsigned int speeds = 0;
|
||||
|
||||
if (ha->max_supported_speed == 2) {
|
||||
if (ha->min_supported_speed <= 6)
|
||||
speeds |= RDP_PORT_SPEED_64GB;
|
||||
}
|
||||
|
||||
if (ha->max_supported_speed == 2 ||
|
||||
ha->max_supported_speed == 1) {
|
||||
if (ha->min_supported_speed <= 5)
|
||||
speeds |= RDP_PORT_SPEED_32GB;
|
||||
}
|
||||
|
||||
if (ha->max_supported_speed == 2 ||
|
||||
ha->max_supported_speed == 1 ||
|
||||
ha->max_supported_speed == 0) {
|
||||
if (ha->min_supported_speed <= 4)
|
||||
speeds |= RDP_PORT_SPEED_16GB;
|
||||
}
|
||||
|
||||
if (ha->max_supported_speed == 1 ||
|
||||
ha->max_supported_speed == 0) {
|
||||
if (ha->min_supported_speed <= 3)
|
||||
speeds |= RDP_PORT_SPEED_8GB;
|
||||
}
|
||||
|
||||
if (ha->max_supported_speed == 0) {
|
||||
if (ha->min_supported_speed <= 2)
|
||||
speeds |= RDP_PORT_SPEED_4GB;
|
||||
}
|
||||
|
||||
return speeds;
|
||||
}
|
||||
|
||||
if (IS_QLA2031(ha))
|
||||
return RDP_PORT_SPEED_16GB|RDP_PORT_SPEED_8GB|
|
||||
RDP_PORT_SPEED_4GB;
|
||||
|
||||
if (IS_QLA25XX(ha))
|
||||
return RDP_PORT_SPEED_8GB|RDP_PORT_SPEED_4GB|
|
||||
RDP_PORT_SPEED_2GB|RDP_PORT_SPEED_1GB;
|
||||
|
||||
if (IS_QLA24XX_TYPE(ha))
|
||||
return RDP_PORT_SPEED_4GB|RDP_PORT_SPEED_2GB|
|
||||
RDP_PORT_SPEED_1GB;
|
||||
|
||||
if (IS_QLA23XX(ha))
|
||||
return RDP_PORT_SPEED_2GB|RDP_PORT_SPEED_1GB;
|
||||
|
||||
return RDP_PORT_SPEED_1GB;
|
||||
}
|
||||
|
||||
static uint
|
||||
qla25xx_rdp_port_speed_currently(struct qla_hw_data *ha)
|
||||
{
|
||||
switch (ha->link_data_rate) {
|
||||
case PORT_SPEED_1GB:
|
||||
return RDP_PORT_SPEED_1GB;
|
||||
|
||||
case PORT_SPEED_2GB:
|
||||
return RDP_PORT_SPEED_2GB;
|
||||
|
||||
case PORT_SPEED_4GB:
|
||||
return RDP_PORT_SPEED_4GB;
|
||||
|
||||
case PORT_SPEED_8GB:
|
||||
return RDP_PORT_SPEED_8GB;
|
||||
|
||||
case PORT_SPEED_10GB:
|
||||
return RDP_PORT_SPEED_10GB;
|
||||
|
||||
case PORT_SPEED_16GB:
|
||||
return RDP_PORT_SPEED_16GB;
|
||||
|
||||
case PORT_SPEED_32GB:
|
||||
return RDP_PORT_SPEED_32GB;
|
||||
|
||||
case PORT_SPEED_64GB:
|
||||
return RDP_PORT_SPEED_64GB;
|
||||
|
||||
default:
|
||||
return RDP_PORT_SPEED_UNKNOWN;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Function Name: qla24xx_process_purex_iocb
|
||||
*
|
||||
|
@ -5921,12 +5839,10 @@ void qla24xx_process_purex_rdp(struct scsi_qla_host *vha,
|
|||
dma_addr_t rsp_els_dma;
|
||||
dma_addr_t rsp_payload_dma;
|
||||
dma_addr_t stat_dma;
|
||||
dma_addr_t bbc_dma;
|
||||
dma_addr_t sfp_dma;
|
||||
struct els_entry_24xx *rsp_els = NULL;
|
||||
struct rdp_rsp_payload *rsp_payload = NULL;
|
||||
struct link_statistics *stat = NULL;
|
||||
struct buffer_credit_24xx *bbc = NULL;
|
||||
uint8_t *sfp = NULL;
|
||||
uint16_t sfp_flags = 0;
|
||||
uint rsp_payload_length = sizeof(*rsp_payload);
|
||||
|
@ -5970,9 +5886,6 @@ void qla24xx_process_purex_rdp(struct scsi_qla_host *vha,
|
|||
stat = dma_alloc_coherent(&ha->pdev->dev, sizeof(*stat),
|
||||
&stat_dma, GFP_KERNEL);
|
||||
|
||||
bbc = dma_alloc_coherent(&ha->pdev->dev, sizeof(*bbc),
|
||||
&bbc_dma, GFP_KERNEL);
|
||||
|
||||
/* Prepare Response IOCB */
|
||||
rsp_els->entry_type = ELS_IOCB_TYPE;
|
||||
rsp_els->entry_count = 1;
|
||||
|
@ -6068,9 +5981,9 @@ void qla24xx_process_purex_rdp(struct scsi_qla_host *vha,
|
|||
rsp_payload->port_speed_desc.desc_len =
|
||||
cpu_to_be32(RDP_DESC_LEN(rsp_payload->port_speed_desc));
|
||||
rsp_payload->port_speed_desc.speed_capab = cpu_to_be16(
|
||||
qla25xx_rdp_port_speed_capability(ha));
|
||||
qla25xx_fdmi_port_speed_capability(ha));
|
||||
rsp_payload->port_speed_desc.operating_speed = cpu_to_be16(
|
||||
qla25xx_rdp_port_speed_currently(ha));
|
||||
qla25xx_fdmi_port_speed_currently(ha));
|
||||
|
||||
/* Link Error Status Descriptor */
|
||||
rsp_payload->ls_err_desc.desc_tag = cpu_to_be32(0x10002);
|
||||
|
@ -6126,13 +6039,10 @@ void qla24xx_process_purex_rdp(struct scsi_qla_host *vha,
|
|||
rsp_payload->buffer_credit_desc.attached_fcport_b2b = cpu_to_be32(0);
|
||||
rsp_payload->buffer_credit_desc.fcport_rtt = cpu_to_be32(0);
|
||||
|
||||
if (bbc) {
|
||||
memset(bbc, 0, sizeof(*bbc));
|
||||
rval = qla24xx_get_buffer_credits(vha, bbc, bbc_dma);
|
||||
if (!rval) {
|
||||
rsp_payload->buffer_credit_desc.fcport_b2b =
|
||||
cpu_to_be32(LSW(bbc->parameter[0]));
|
||||
}
|
||||
if (ha->flags.plogi_template_valid) {
|
||||
uint32_t tmp =
|
||||
be16_to_cpu(ha->plogi_els_payld.fl_csp.sp_bb_cred);
|
||||
rsp_payload->buffer_credit_desc.fcport_b2b = cpu_to_be32(tmp);
|
||||
}
|
||||
|
||||
if (rsp_payload_length < sizeof(*rsp_payload))
|
||||
|
@ -6310,9 +6220,6 @@ void qla24xx_process_purex_rdp(struct scsi_qla_host *vha,
|
|||
}
|
||||
|
||||
dealloc:
|
||||
if (bbc)
|
||||
dma_free_coherent(&ha->pdev->dev, sizeof(*bbc),
|
||||
bbc, bbc_dma);
|
||||
if (stat)
|
||||
dma_free_coherent(&ha->pdev->dev, sizeof(*stat),
|
||||
stat, stat_dma);
|
||||
|
@ -7289,8 +7196,10 @@ qla2x00_timer(struct timer_list *t)
|
|||
* FC-NVME
|
||||
* see if the active AEN count has changed from what was last reported.
|
||||
*/
|
||||
index = atomic_read(&ha->nvme_active_aen_cnt);
|
||||
if (!vha->vp_idx &&
|
||||
(atomic_read(&ha->nvme_active_aen_cnt) != ha->nvme_last_rptd_aen) &&
|
||||
(index != ha->nvme_last_rptd_aen) &&
|
||||
(index >= DEFAULT_ZIO_THRESHOLD) &&
|
||||
ha->zio_mode == QLA_ZIO_MODE_6 &&
|
||||
!ha->flags.host_shutting_down) {
|
||||
ql_log(ql_log_info, vha, 0x3002,
|
||||
|
@ -7302,9 +7211,8 @@ qla2x00_timer(struct timer_list *t)
|
|||
}
|
||||
|
||||
if (!vha->vp_idx &&
|
||||
(atomic_read(&ha->zio_threshold) != ha->last_zio_threshold) &&
|
||||
(ha->zio_mode == QLA_ZIO_MODE_6) &&
|
||||
(IS_QLA83XX(ha) || IS_QLA27XX(ha) || IS_QLA28XX(ha))) {
|
||||
atomic_read(&ha->zio_threshold) != ha->last_zio_threshold &&
|
||||
IS_ZIO_THRESHOLD_CAPABLE(ha)) {
|
||||
ql_log(ql_log_info, vha, 0x3002,
|
||||
"Sched: Set ZIO exchange threshold to %d.\n",
|
||||
ha->last_zio_threshold);
|
||||
|
@ -8044,7 +7952,6 @@ module_exit(qla2x00_module_exit);
|
|||
MODULE_AUTHOR("QLogic Corporation");
|
||||
MODULE_DESCRIPTION("QLogic Fibre Channel HBA Driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_VERSION(QLA2XXX_VERSION);
|
||||
MODULE_FIRMWARE(FW_FILE_ISP21XX);
|
||||
MODULE_FIRMWARE(FW_FILE_ISP22XX);
|
||||
MODULE_FIRMWARE(FW_FILE_ISP2300);
|
||||
|
|
|
@ -1111,6 +1111,8 @@ void qlt_free_session_done(struct work_struct *work)
|
|||
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
|
||||
sess->free_pending = 0;
|
||||
|
||||
qla2x00_dfs_remove_rport(vha, sess);
|
||||
|
||||
ql_dbg(ql_dbg_disc, vha, 0xf001,
|
||||
"Unregistration of sess %p %8phC finished fcp_cnt %d\n",
|
||||
sess, sess->port_name, vha->fcport_count);
|
||||
|
@ -1229,14 +1231,15 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess)
|
|||
case DSC_DELETE_PEND:
|
||||
return;
|
||||
case DSC_DELETED:
|
||||
if (tgt && tgt->tgt_stop && (tgt->sess_count == 0))
|
||||
wake_up_all(&tgt->waitQ);
|
||||
if (sess->vha->fcport_count == 0)
|
||||
wake_up_all(&sess->vha->fcport_waitQ);
|
||||
|
||||
if (!sess->plogi_link[QLT_PLOGI_LINK_SAME_WWN] &&
|
||||
!sess->plogi_link[QLT_PLOGI_LINK_CONFLICT])
|
||||
!sess->plogi_link[QLT_PLOGI_LINK_CONFLICT]) {
|
||||
if (tgt && tgt->tgt_stop && tgt->sess_count == 0)
|
||||
wake_up_all(&tgt->waitQ);
|
||||
|
||||
if (sess->vha->fcport_count == 0)
|
||||
wake_up_all(&sess->vha->fcport_waitQ);
|
||||
return;
|
||||
}
|
||||
break;
|
||||
case DSC_UPD_FCPORT:
|
||||
/*
|
||||
|
@ -2025,7 +2028,7 @@ static void qlt_do_tmr_work(struct work_struct *work)
|
|||
struct qla_tgt_mgmt_cmd *mcmd =
|
||||
container_of(work, struct qla_tgt_mgmt_cmd, work);
|
||||
struct qla_hw_data *ha = mcmd->vha->hw;
|
||||
int rc = EIO;
|
||||
int rc;
|
||||
uint32_t tag;
|
||||
unsigned long flags;
|
||||
|
||||
|
@ -3781,7 +3784,7 @@ int qlt_abort_cmd(struct qla_tgt_cmd *cmd)
|
|||
"multiple abort. %p transport_state %x, t_state %x, "
|
||||
"se_cmd_flags %x\n", cmd, cmd->se_cmd.transport_state,
|
||||
cmd->se_cmd.t_state, cmd->se_cmd.se_cmd_flags);
|
||||
return EIO;
|
||||
return -EIO;
|
||||
}
|
||||
cmd->aborted = 1;
|
||||
cmd->trc_flags |= TRC_ABORT;
|
||||
|
@ -5668,7 +5671,7 @@ static int qlt_chk_unresolv_exchg(struct scsi_qla_host *vha,
|
|||
/* found existing exchange */
|
||||
qpair->retry_term_cnt++;
|
||||
if (qpair->retry_term_cnt >= 5) {
|
||||
rc = EIO;
|
||||
rc = -EIO;
|
||||
qpair->retry_term_cnt = 0;
|
||||
ql_log(ql_log_warn, vha, 0xffff,
|
||||
"Unable to send ABTS Respond. Dumping firmware.\n");
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue