mirror of https://gitee.com/openkylin/linux.git
SCSI misc on 20130222
he patch set is mostly driver updates (bnx2fc, ipr, lpfc, qla4) and a few bug fixes. Signed-off-by: James Bottomley <JBottomley@Parallels.com> -----BEGIN PGP SIGNATURE----- Version: GnuPG v2.0.19 (GNU/Linux) iQEcBAABAgAGBQJRJ0oJAAoJEDeqqVYsXL0MVVoH/A8xFiLkdCXaFkhYMlGRrDox wgK/RwWekDOtVS2poPhDGKRfXaUn4uA3iOJig8HC5lD8NS65DElCpCWM+/DhitXt Ky4ukgXSQ09IQtWraGqr//MC/YqM8iimWnGgXSouLPJ7a3AqYVIYCg9CEkGJX/mD i09aE8uUyNd3Wp68anQ2w0RCH/7/InLL348WFmQ1eWxYyFJnLYGRkASbkuHxPjgU H4QmINFlI4kBMWdHkVinh0w7cjcmUOAU+KyAZ75aelQ6dZ2aJioKn3BS7D6gF9jv jJpJMIj8LzpAnfR3Z5ijkkcVG7E0ht+Dtr6kmAPZQJnkc/GdQvvgEg+F9aIpxoU= =oDz4 -----END PGP SIGNATURE----- Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi Pull first round of SCSI updates from James Bottomley: "The patch set is mostly driver updates (bnx2fc, ipr, lpfc, qla4) and a few bug fixes" Pull delayed because google hates James, and sneakily considers his pull requests spam. Why, google, why? * tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (60 commits) [SCSI] aacraid: 1024 max outstanding command support for Series 7 and above [SCSI] bnx2fc: adjust duplicate test [SCSI] qla4xxx: Update driver version to 5.03.00-k4 [SCSI] qla4xxx: Fix return code for qla4xxx_session_get_param. [SCSI] qla4xxx: wait for boot target login response during probe. [SCSI] qla4xxx: Added support for force firmware dump [SCSI] qla4xxx: Re-register IRQ handler while retrying initialize of adapter [SCSI] qla4xxx: Throttle active IOCBs to firmware limits [SCSI] qla4xxx: Remove unnecessary code from qla4xxx_init_local_data [SCSI] qla4xxx: Quiesce driver activities while loopback [SCSI] qla4xxx: Rename MBOX_ASTS_IDC_NOTIFY to MBOX_ASTS_IDC_REQUEST_NOTIFICATION [SCSI] qla4xxx: Add spurious interrupt messages under debug level 2 [SCSI] cxgb4i: Remove the scsi host device when removing device [SCSI] bfa: fix strncpy() limiter in bfad_start_ops() [SCSI] qla4xxx: Update driver version to 5.03.00-k3 [SCSI] qla4xxx: Correct the validation to check in get_sys_info mailbox [SCSI] qla4xxx: Pass correct function param to qla4_8xxx_rd_direct [SCSI] lpfc 8.3.37: Update lpfc version for 8.3.37 driver release [SCSI] lpfc 8.3.37: Fixed infinite loop in lpfc_sli4_fcf_rr_next_index_get. [SCSI] lpfc 8.3.37: Fixed crash due to SLI Port invalid resource count ...
This commit is contained in:
commit
21f3b24da9
|
@ -1800,7 +1800,8 @@ F: drivers/bcma/
|
|||
F: include/linux/bcma/
|
||||
|
||||
BROCADE BFA FC SCSI DRIVER
|
||||
M: Krishna C Gudipati <kgudipat@brocade.com>
|
||||
M: Anil Gurumurthy <agurumur@brocade.com>
|
||||
M: Vijaya Mohan Guvva <vmohan@brocade.com>
|
||||
L: linux-scsi@vger.kernel.org
|
||||
S: Supported
|
||||
F: drivers/scsi/bfa/
|
||||
|
@ -2074,8 +2075,8 @@ S: Maintained
|
|||
F: include/linux/clk.h
|
||||
|
||||
CISCO FCOE HBA DRIVER
|
||||
M: Abhijeet Joglekar <abjoglek@cisco.com>
|
||||
M: Venkata Siva Vijayendra Bhamidipati <vbhamidi@cisco.com>
|
||||
M: Hiral Patel <hiralpat@cisco.com>
|
||||
M: Suma Ramars <sramars@cisco.com>
|
||||
M: Brian Uchino <buchino@cisco.com>
|
||||
L: linux-scsi@vger.kernel.org
|
||||
S: Supported
|
||||
|
|
|
@ -12,13 +12,13 @@
|
|||
*----------------------------------------------------------------------------*/
|
||||
|
||||
#ifndef AAC_DRIVER_BUILD
|
||||
# define AAC_DRIVER_BUILD 29801
|
||||
# define AAC_DRIVER_BUILD 30000
|
||||
# define AAC_DRIVER_BRANCH "-ms"
|
||||
#endif
|
||||
#define MAXIMUM_NUM_CONTAINERS 32
|
||||
|
||||
#define AAC_NUM_MGT_FIB 8
|
||||
#define AAC_NUM_IO_FIB (512 - AAC_NUM_MGT_FIB)
|
||||
#define AAC_NUM_IO_FIB (1024 - AAC_NUM_MGT_FIB)
|
||||
#define AAC_NUM_FIB (AAC_NUM_IO_FIB + AAC_NUM_MGT_FIB)
|
||||
|
||||
#define AAC_MAX_LUN (8)
|
||||
|
@ -36,6 +36,10 @@
|
|||
#define CONTAINER_TO_ID(cont) (cont)
|
||||
#define CONTAINER_TO_LUN(cont) (0)
|
||||
|
||||
#define PMC_DEVICE_S7 0x28c
|
||||
#define PMC_DEVICE_S8 0x28d
|
||||
#define PMC_DEVICE_S9 0x28f
|
||||
|
||||
#define aac_phys_to_logical(x) ((x)+1)
|
||||
#define aac_logical_to_phys(x) ((x)?(x)-1:0)
|
||||
|
||||
|
|
|
@ -404,7 +404,13 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
|
|||
dev->max_fib_size = status[1] & 0xFFE0;
|
||||
host->sg_tablesize = status[2] >> 16;
|
||||
dev->sg_tablesize = status[2] & 0xFFFF;
|
||||
host->can_queue = (status[3] & 0xFFFF) - AAC_NUM_MGT_FIB;
|
||||
if (dev->pdev->device == PMC_DEVICE_S7 ||
|
||||
dev->pdev->device == PMC_DEVICE_S8 ||
|
||||
dev->pdev->device == PMC_DEVICE_S9)
|
||||
host->can_queue = ((status[3] >> 16) ? (status[3] >> 16) :
|
||||
(status[3] & 0xFFFF)) - AAC_NUM_MGT_FIB;
|
||||
else
|
||||
host->can_queue = (status[3] & 0xFFFF) - AAC_NUM_MGT_FIB;
|
||||
dev->max_num_aif = status[4] & 0xFFFF;
|
||||
/*
|
||||
* NOTE:
|
||||
|
@ -452,6 +458,9 @@ struct aac_dev *aac_init_adapter(struct aac_dev *dev)
|
|||
}
|
||||
}
|
||||
|
||||
if (host->can_queue > AAC_NUM_IO_FIB)
|
||||
host->can_queue = AAC_NUM_IO_FIB;
|
||||
|
||||
/*
|
||||
* Ok now init the communication subsystem
|
||||
*/
|
||||
|
|
|
@ -1034,7 +1034,7 @@ bfad_start_ops(struct bfad_s *bfad) {
|
|||
sizeof(driver_info.host_os_patch) - 1);
|
||||
|
||||
strncpy(driver_info.os_device_name, bfad->pci_name,
|
||||
sizeof(driver_info.os_device_name - 1));
|
||||
sizeof(driver_info.os_device_name) - 1);
|
||||
|
||||
/* FCS driver info init */
|
||||
spin_lock_irqsave(&bfad->bfad_lock, flags);
|
||||
|
|
|
@ -64,7 +64,7 @@
|
|||
#include "bnx2fc_constants.h"
|
||||
|
||||
#define BNX2FC_NAME "bnx2fc"
|
||||
#define BNX2FC_VERSION "1.0.12"
|
||||
#define BNX2FC_VERSION "1.0.13"
|
||||
|
||||
#define PFX "bnx2fc: "
|
||||
|
||||
|
@ -156,6 +156,18 @@
|
|||
#define BNX2FC_RELOGIN_WAIT_TIME 200
|
||||
#define BNX2FC_RELOGIN_WAIT_CNT 10
|
||||
|
||||
#define BNX2FC_STATS(hba, stat, cnt) \
|
||||
do { \
|
||||
u32 val; \
|
||||
\
|
||||
val = fw_stats->stat.cnt; \
|
||||
if (hba->prev_stats.stat.cnt <= val) \
|
||||
val -= hba->prev_stats.stat.cnt; \
|
||||
else \
|
||||
val += (0xfffffff - hba->prev_stats.stat.cnt); \
|
||||
hba->bfw_stats.cnt += val; \
|
||||
} while (0)
|
||||
|
||||
/* bnx2fc driver uses only one instance of fcoe_percpu_s */
|
||||
extern struct fcoe_percpu_s bnx2fc_global;
|
||||
|
||||
|
@ -167,6 +179,14 @@ struct bnx2fc_percpu_s {
|
|||
spinlock_t fp_work_lock;
|
||||
};
|
||||
|
||||
struct bnx2fc_fw_stats {
|
||||
u64 fc_crc_cnt;
|
||||
u64 fcoe_tx_pkt_cnt;
|
||||
u64 fcoe_rx_pkt_cnt;
|
||||
u64 fcoe_tx_byte_cnt;
|
||||
u64 fcoe_rx_byte_cnt;
|
||||
};
|
||||
|
||||
struct bnx2fc_hba {
|
||||
struct list_head list;
|
||||
struct cnic_dev *cnic;
|
||||
|
@ -207,6 +227,8 @@ struct bnx2fc_hba {
|
|||
struct bnx2fc_rport **tgt_ofld_list;
|
||||
|
||||
/* statistics */
|
||||
struct bnx2fc_fw_stats bfw_stats;
|
||||
struct fcoe_statistics_params prev_stats;
|
||||
struct fcoe_statistics_params *stats_buffer;
|
||||
dma_addr_t stats_buf_dma;
|
||||
struct completion stat_req_done;
|
||||
|
@ -280,6 +302,7 @@ struct bnx2fc_rport {
|
|||
#define BNX2FC_FLAG_UPLD_REQ_COMPL 0x7
|
||||
#define BNX2FC_FLAG_EXPL_LOGO 0x8
|
||||
#define BNX2FC_FLAG_DISABLE_FAILED 0x9
|
||||
#define BNX2FC_FLAG_ENABLED 0xa
|
||||
|
||||
u8 src_addr[ETH_ALEN];
|
||||
u32 max_sqes;
|
||||
|
@ -468,6 +491,8 @@ int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba);
|
|||
int bnx2fc_send_fw_fcoe_destroy_msg(struct bnx2fc_hba *hba);
|
||||
int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
|
||||
struct bnx2fc_rport *tgt);
|
||||
int bnx2fc_send_session_enable_req(struct fcoe_port *port,
|
||||
struct bnx2fc_rport *tgt);
|
||||
int bnx2fc_send_session_disable_req(struct fcoe_port *port,
|
||||
struct bnx2fc_rport *tgt);
|
||||
int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba,
|
||||
|
|
|
@ -22,7 +22,7 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
|
|||
|
||||
#define DRV_MODULE_NAME "bnx2fc"
|
||||
#define DRV_MODULE_VERSION BNX2FC_VERSION
|
||||
#define DRV_MODULE_RELDATE "Jun 04, 2012"
|
||||
#define DRV_MODULE_RELDATE "Dec 21, 2012"
|
||||
|
||||
|
||||
static char version[] =
|
||||
|
@ -687,11 +687,16 @@ static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost)
|
|||
BNX2FC_HBA_DBG(lport, "FW stat req timed out\n");
|
||||
return bnx2fc_stats;
|
||||
}
|
||||
bnx2fc_stats->invalid_crc_count += fw_stats->rx_stat2.fc_crc_cnt;
|
||||
bnx2fc_stats->tx_frames += fw_stats->tx_stat.fcoe_tx_pkt_cnt;
|
||||
bnx2fc_stats->tx_words += (fw_stats->tx_stat.fcoe_tx_byte_cnt) / 4;
|
||||
bnx2fc_stats->rx_frames += fw_stats->rx_stat0.fcoe_rx_pkt_cnt;
|
||||
bnx2fc_stats->rx_words += (fw_stats->rx_stat0.fcoe_rx_byte_cnt) / 4;
|
||||
BNX2FC_STATS(hba, rx_stat2, fc_crc_cnt);
|
||||
bnx2fc_stats->invalid_crc_count += hba->bfw_stats.fc_crc_cnt;
|
||||
BNX2FC_STATS(hba, tx_stat, fcoe_tx_pkt_cnt);
|
||||
bnx2fc_stats->tx_frames += hba->bfw_stats.fcoe_tx_pkt_cnt;
|
||||
BNX2FC_STATS(hba, tx_stat, fcoe_tx_byte_cnt);
|
||||
bnx2fc_stats->tx_words += ((hba->bfw_stats.fcoe_tx_byte_cnt) / 4);
|
||||
BNX2FC_STATS(hba, rx_stat0, fcoe_rx_pkt_cnt);
|
||||
bnx2fc_stats->rx_frames += hba->bfw_stats.fcoe_rx_pkt_cnt;
|
||||
BNX2FC_STATS(hba, rx_stat0, fcoe_rx_byte_cnt);
|
||||
bnx2fc_stats->rx_words += ((hba->bfw_stats.fcoe_rx_byte_cnt) / 4);
|
||||
|
||||
bnx2fc_stats->dumped_frames = 0;
|
||||
bnx2fc_stats->lip_count = 0;
|
||||
|
@ -700,6 +705,8 @@ static struct fc_host_statistics *bnx2fc_get_host_stats(struct Scsi_Host *shost)
|
|||
bnx2fc_stats->loss_of_signal_count = 0;
|
||||
bnx2fc_stats->prim_seq_protocol_err_count = 0;
|
||||
|
||||
memcpy(&hba->prev_stats, hba->stats_buffer,
|
||||
sizeof(struct fcoe_statistics_params));
|
||||
return bnx2fc_stats;
|
||||
}
|
||||
|
||||
|
@ -2660,7 +2667,7 @@ static struct scsi_host_template bnx2fc_shost_template = {
|
|||
.can_queue = BNX2FC_CAN_QUEUE,
|
||||
.use_clustering = ENABLE_CLUSTERING,
|
||||
.sg_tablesize = BNX2FC_MAX_BDS_PER_CMD,
|
||||
.max_sectors = 512,
|
||||
.max_sectors = 1024,
|
||||
};
|
||||
|
||||
static struct libfc_function_template bnx2fc_libfc_fcn_templ = {
|
||||
|
|
|
@ -347,7 +347,7 @@ int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
|
|||
* @port: port structure pointer
|
||||
* @tgt: bnx2fc_rport structure pointer
|
||||
*/
|
||||
static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
|
||||
int bnx2fc_send_session_enable_req(struct fcoe_port *port,
|
||||
struct bnx2fc_rport *tgt)
|
||||
{
|
||||
struct kwqe *kwqe_arr[2];
|
||||
|
@ -759,8 +759,6 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
|
|||
case FCOE_ERROR_CODE_DATA_SOFN_SEQ_ACTIVE_RESET:
|
||||
BNX2FC_TGT_DBG(tgt, "REC TOV popped for xid - 0x%x\n",
|
||||
xid);
|
||||
memset(&io_req->err_entry, 0,
|
||||
sizeof(struct fcoe_err_report_entry));
|
||||
memcpy(&io_req->err_entry, err_entry,
|
||||
sizeof(struct fcoe_err_report_entry));
|
||||
if (!test_bit(BNX2FC_FLAG_SRR_SENT,
|
||||
|
@ -847,8 +845,6 @@ static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
|
|||
goto ret_warn_rqe;
|
||||
}
|
||||
|
||||
memset(&io_req->err_entry, 0,
|
||||
sizeof(struct fcoe_err_report_entry));
|
||||
memcpy(&io_req->err_entry, err_entry,
|
||||
sizeof(struct fcoe_err_report_entry));
|
||||
|
||||
|
@ -1124,7 +1120,6 @@ static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
|
|||
struct bnx2fc_interface *interface;
|
||||
u32 conn_id;
|
||||
u32 context_id;
|
||||
int rc;
|
||||
|
||||
conn_id = ofld_kcqe->fcoe_conn_id;
|
||||
context_id = ofld_kcqe->fcoe_conn_context_id;
|
||||
|
@ -1153,17 +1148,10 @@ static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
|
|||
"resources\n");
|
||||
set_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE, &tgt->flags);
|
||||
}
|
||||
goto ofld_cmpl_err;
|
||||
} else {
|
||||
|
||||
/* now enable the session */
|
||||
rc = bnx2fc_send_session_enable_req(port, tgt);
|
||||
if (rc) {
|
||||
printk(KERN_ERR PFX "enable session failed\n");
|
||||
goto ofld_cmpl_err;
|
||||
}
|
||||
/* FW offload request successfully completed */
|
||||
set_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
|
||||
}
|
||||
return;
|
||||
ofld_cmpl_err:
|
||||
set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
|
||||
wake_up_interruptible(&tgt->ofld_wait);
|
||||
|
@ -1210,15 +1198,9 @@ static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
|
|||
printk(KERN_ERR PFX "bnx2fc-enbl_cmpl: HBA mis-match\n");
|
||||
goto enbl_cmpl_err;
|
||||
}
|
||||
if (ofld_kcqe->completion_status)
|
||||
goto enbl_cmpl_err;
|
||||
else {
|
||||
if (!ofld_kcqe->completion_status)
|
||||
/* enable successful - rport ready for issuing IOs */
|
||||
set_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
|
||||
set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
|
||||
wake_up_interruptible(&tgt->ofld_wait);
|
||||
}
|
||||
return;
|
||||
set_bit(BNX2FC_FLAG_ENABLED, &tgt->flags);
|
||||
|
||||
enbl_cmpl_err:
|
||||
set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
|
||||
|
@ -1251,6 +1233,7 @@ static void bnx2fc_process_conn_disable_cmpl(struct bnx2fc_hba *hba,
|
|||
/* disable successful */
|
||||
BNX2FC_TGT_DBG(tgt, "disable successful\n");
|
||||
clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
|
||||
clear_bit(BNX2FC_FLAG_ENABLED, &tgt->flags);
|
||||
set_bit(BNX2FC_FLAG_DISABLED, &tgt->flags);
|
||||
set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
|
||||
wake_up_interruptible(&tgt->upld_wait);
|
||||
|
|
|
@ -654,7 +654,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
|
|||
mp_req->mp_resp_bd = dma_alloc_coherent(&hba->pcidev->dev, sz,
|
||||
&mp_req->mp_resp_bd_dma,
|
||||
GFP_ATOMIC);
|
||||
if (!mp_req->mp_req_bd) {
|
||||
if (!mp_req->mp_resp_bd) {
|
||||
printk(KERN_ERR PFX "unable to alloc MP resp bd\n");
|
||||
bnx2fc_free_mp_resc(io_req);
|
||||
return FAILED;
|
||||
|
@ -685,8 +685,8 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
|
|||
static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
|
||||
{
|
||||
struct fc_lport *lport;
|
||||
struct fc_rport *rport = starget_to_rport(scsi_target(sc_cmd->device));
|
||||
struct fc_rport_libfc_priv *rp = rport->dd_data;
|
||||
struct fc_rport *rport;
|
||||
struct fc_rport_libfc_priv *rp;
|
||||
struct fcoe_port *port;
|
||||
struct bnx2fc_interface *interface;
|
||||
struct bnx2fc_rport *tgt;
|
||||
|
@ -704,6 +704,7 @@ static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
|
|||
unsigned long start = jiffies;
|
||||
|
||||
lport = shost_priv(host);
|
||||
rport = starget_to_rport(scsi_target(sc_cmd->device));
|
||||
port = lport_priv(lport);
|
||||
interface = port->priv;
|
||||
|
||||
|
@ -712,6 +713,7 @@ static int bnx2fc_initiate_tmf(struct scsi_cmnd *sc_cmd, u8 tm_flags)
|
|||
rc = FAILED;
|
||||
goto tmf_err;
|
||||
}
|
||||
rp = rport->dd_data;
|
||||
|
||||
rc = fc_block_scsi_eh(sc_cmd);
|
||||
if (rc)
|
||||
|
|
|
@ -33,6 +33,7 @@ static void bnx2fc_upld_timer(unsigned long data)
|
|||
BNX2FC_TGT_DBG(tgt, "upld_timer - Upload compl not received!!\n");
|
||||
/* fake upload completion */
|
||||
clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
|
||||
clear_bit(BNX2FC_FLAG_ENABLED, &tgt->flags);
|
||||
set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
|
||||
wake_up_interruptible(&tgt->upld_wait);
|
||||
}
|
||||
|
@ -55,10 +56,25 @@ static void bnx2fc_ofld_timer(unsigned long data)
|
|||
* resources are freed up in bnx2fc_offload_session
|
||||
*/
|
||||
clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
|
||||
clear_bit(BNX2FC_FLAG_ENABLED, &tgt->flags);
|
||||
set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
|
||||
wake_up_interruptible(&tgt->ofld_wait);
|
||||
}
|
||||
|
||||
static void bnx2fc_ofld_wait(struct bnx2fc_rport *tgt)
|
||||
{
|
||||
setup_timer(&tgt->ofld_timer, bnx2fc_ofld_timer, (unsigned long)tgt);
|
||||
mod_timer(&tgt->ofld_timer, jiffies + BNX2FC_FW_TIMEOUT);
|
||||
|
||||
wait_event_interruptible(tgt->ofld_wait,
|
||||
(test_bit(
|
||||
BNX2FC_FLAG_OFLD_REQ_CMPL,
|
||||
&tgt->flags)));
|
||||
if (signal_pending(current))
|
||||
flush_signals(current);
|
||||
del_timer_sync(&tgt->ofld_timer);
|
||||
}
|
||||
|
||||
static void bnx2fc_offload_session(struct fcoe_port *port,
|
||||
struct bnx2fc_rport *tgt,
|
||||
struct fc_rport_priv *rdata)
|
||||
|
@ -103,17 +119,7 @@ static void bnx2fc_offload_session(struct fcoe_port *port,
|
|||
* wait for the session is offloaded and enabled. 3 Secs
|
||||
* should be ample time for this process to complete.
|
||||
*/
|
||||
setup_timer(&tgt->ofld_timer, bnx2fc_ofld_timer, (unsigned long)tgt);
|
||||
mod_timer(&tgt->ofld_timer, jiffies + BNX2FC_FW_TIMEOUT);
|
||||
|
||||
wait_event_interruptible(tgt->ofld_wait,
|
||||
(test_bit(
|
||||
BNX2FC_FLAG_OFLD_REQ_CMPL,
|
||||
&tgt->flags)));
|
||||
if (signal_pending(current))
|
||||
flush_signals(current);
|
||||
|
||||
del_timer_sync(&tgt->ofld_timer);
|
||||
bnx2fc_ofld_wait(tgt);
|
||||
|
||||
if (!(test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags))) {
|
||||
if (test_and_clear_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE,
|
||||
|
@ -131,14 +137,23 @@ static void bnx2fc_offload_session(struct fcoe_port *port,
|
|||
}
|
||||
if (bnx2fc_map_doorbell(tgt)) {
|
||||
printk(KERN_ERR PFX "map doorbell failed - no mem\n");
|
||||
/* upload will take care of cleaning up sess resc */
|
||||
lport->tt.rport_logoff(rdata);
|
||||
goto ofld_err;
|
||||
}
|
||||
clear_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
|
||||
rval = bnx2fc_send_session_enable_req(port, tgt);
|
||||
if (rval) {
|
||||
pr_err(PFX "enable session failed\n");
|
||||
goto ofld_err;
|
||||
}
|
||||
bnx2fc_ofld_wait(tgt);
|
||||
if (!(test_bit(BNX2FC_FLAG_ENABLED, &tgt->flags)))
|
||||
goto ofld_err;
|
||||
return;
|
||||
|
||||
ofld_err:
|
||||
/* couldn't offload the session. log off from this rport */
|
||||
BNX2FC_TGT_DBG(tgt, "bnx2fc_offload_session - offload error\n");
|
||||
clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
|
||||
/* Free session resources */
|
||||
bnx2fc_free_session_resc(hba, tgt);
|
||||
tgt_init_err:
|
||||
|
@ -259,6 +274,19 @@ void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
|
|||
spin_unlock_bh(&tgt->tgt_lock);
|
||||
}
|
||||
|
||||
static void bnx2fc_upld_wait(struct bnx2fc_rport *tgt)
|
||||
{
|
||||
setup_timer(&tgt->upld_timer, bnx2fc_upld_timer, (unsigned long)tgt);
|
||||
mod_timer(&tgt->upld_timer, jiffies + BNX2FC_FW_TIMEOUT);
|
||||
wait_event_interruptible(tgt->upld_wait,
|
||||
(test_bit(
|
||||
BNX2FC_FLAG_UPLD_REQ_COMPL,
|
||||
&tgt->flags)));
|
||||
if (signal_pending(current))
|
||||
flush_signals(current);
|
||||
del_timer_sync(&tgt->upld_timer);
|
||||
}
|
||||
|
||||
static void bnx2fc_upload_session(struct fcoe_port *port,
|
||||
struct bnx2fc_rport *tgt)
|
||||
{
|
||||
|
@ -279,19 +307,8 @@ static void bnx2fc_upload_session(struct fcoe_port *port,
|
|||
* wait for upload to complete. 3 Secs
|
||||
* should be sufficient time for this process to complete.
|
||||
*/
|
||||
setup_timer(&tgt->upld_timer, bnx2fc_upld_timer, (unsigned long)tgt);
|
||||
mod_timer(&tgt->upld_timer, jiffies + BNX2FC_FW_TIMEOUT);
|
||||
|
||||
BNX2FC_TGT_DBG(tgt, "waiting for disable compl\n");
|
||||
wait_event_interruptible(tgt->upld_wait,
|
||||
(test_bit(
|
||||
BNX2FC_FLAG_UPLD_REQ_COMPL,
|
||||
&tgt->flags)));
|
||||
|
||||
if (signal_pending(current))
|
||||
flush_signals(current);
|
||||
|
||||
del_timer_sync(&tgt->upld_timer);
|
||||
bnx2fc_upld_wait(tgt);
|
||||
|
||||
/*
|
||||
* traverse thru the active_q and tmf_q and cleanup
|
||||
|
@ -308,24 +325,13 @@ static void bnx2fc_upload_session(struct fcoe_port *port,
|
|||
bnx2fc_send_session_destroy_req(hba, tgt);
|
||||
|
||||
/* wait for destroy to complete */
|
||||
setup_timer(&tgt->upld_timer,
|
||||
bnx2fc_upld_timer, (unsigned long)tgt);
|
||||
mod_timer(&tgt->upld_timer, jiffies + BNX2FC_FW_TIMEOUT);
|
||||
|
||||
wait_event_interruptible(tgt->upld_wait,
|
||||
(test_bit(
|
||||
BNX2FC_FLAG_UPLD_REQ_COMPL,
|
||||
&tgt->flags)));
|
||||
bnx2fc_upld_wait(tgt);
|
||||
|
||||
if (!(test_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags)))
|
||||
printk(KERN_ERR PFX "ERROR!! destroy timed out\n");
|
||||
|
||||
BNX2FC_TGT_DBG(tgt, "destroy wait complete flags = 0x%lx\n",
|
||||
tgt->flags);
|
||||
if (signal_pending(current))
|
||||
flush_signals(current);
|
||||
|
||||
del_timer_sync(&tgt->upld_timer);
|
||||
|
||||
} else if (test_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags)) {
|
||||
printk(KERN_ERR PFX "ERROR!! DISABLE req failed, destroy"
|
||||
|
@ -381,7 +387,9 @@ static int bnx2fc_init_tgt(struct bnx2fc_rport *tgt,
|
|||
tgt->rq_cons_idx = 0;
|
||||
atomic_set(&tgt->num_active_ios, 0);
|
||||
|
||||
if (rdata->flags & FC_RP_FLAGS_RETRY) {
|
||||
if (rdata->flags & FC_RP_FLAGS_RETRY &&
|
||||
rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET &&
|
||||
!(rdata->ids.roles & FC_RPORT_ROLE_FCP_INITIATOR)) {
|
||||
tgt->dev_type = TYPE_TAPE;
|
||||
tgt->io_timeout = 0; /* use default ULP timeout */
|
||||
} else {
|
||||
|
@ -479,7 +487,7 @@ void bnx2fc_rport_event_handler(struct fc_lport *lport,
|
|||
tgt = (struct bnx2fc_rport *)&rp[1];
|
||||
|
||||
/* This can happen when ADISC finds the same target */
|
||||
if (test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags)) {
|
||||
if (test_bit(BNX2FC_FLAG_ENABLED, &tgt->flags)) {
|
||||
BNX2FC_TGT_DBG(tgt, "already offloaded\n");
|
||||
mutex_unlock(&hba->hba_mutex);
|
||||
return;
|
||||
|
@ -494,11 +502,8 @@ void bnx2fc_rport_event_handler(struct fc_lport *lport,
|
|||
BNX2FC_TGT_DBG(tgt, "OFFLOAD num_ofld_sess = %d\n",
|
||||
hba->num_ofld_sess);
|
||||
|
||||
if (test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags)) {
|
||||
/*
|
||||
* Session is offloaded and enabled. Map
|
||||
* doorbell register for this target
|
||||
*/
|
||||
if (test_bit(BNX2FC_FLAG_ENABLED, &tgt->flags)) {
|
||||
/* Session is offloaded and enabled. */
|
||||
BNX2FC_TGT_DBG(tgt, "sess offloaded\n");
|
||||
/* This counter is protected with hba mutex */
|
||||
hba->num_ofld_sess++;
|
||||
|
@ -535,7 +540,7 @@ void bnx2fc_rport_event_handler(struct fc_lport *lport,
|
|||
*/
|
||||
tgt = (struct bnx2fc_rport *)&rp[1];
|
||||
|
||||
if (!(test_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags))) {
|
||||
if (!(test_bit(BNX2FC_FLAG_ENABLED, &tgt->flags))) {
|
||||
mutex_unlock(&hba->hba_mutex);
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -1317,7 +1317,7 @@ int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba)
|
|||
(1ULL << ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN));
|
||||
if (error_mask1) {
|
||||
iscsi_init2.error_bit_map[0] = error_mask1;
|
||||
mask64 &= (u32)(~mask64);
|
||||
mask64 ^= (u32)(mask64);
|
||||
mask64 |= error_mask1;
|
||||
} else
|
||||
iscsi_init2.error_bit_map[0] = (u32) mask64;
|
||||
|
|
|
@ -2131,13 +2131,16 @@ csio_hw_flash_config(struct csio_hw *hw, u32 *fw_cfg_param, char *path)
|
|||
value_to_add = 4 - (cf->size % 4);
|
||||
|
||||
cfg_data = kzalloc(cf->size+value_to_add, GFP_KERNEL);
|
||||
if (cfg_data == NULL)
|
||||
return -ENOMEM;
|
||||
if (cfg_data == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto leave;
|
||||
}
|
||||
|
||||
memcpy((void *)cfg_data, (const void *)cf->data, cf->size);
|
||||
|
||||
if (csio_hw_check_fwconfig(hw, fw_cfg_param) != 0)
|
||||
return -EINVAL;
|
||||
if (csio_hw_check_fwconfig(hw, fw_cfg_param) != 0) {
|
||||
ret = -EINVAL;
|
||||
goto leave;
|
||||
}
|
||||
|
||||
mtype = FW_PARAMS_PARAM_Y_GET(*fw_cfg_param);
|
||||
maddr = FW_PARAMS_PARAM_Z_GET(*fw_cfg_param) << 16;
|
||||
|
@ -2149,9 +2152,9 @@ csio_hw_flash_config(struct csio_hw *hw, u32 *fw_cfg_param, char *path)
|
|||
strncpy(path, "/lib/firmware/" CSIO_CF_FNAME, 64);
|
||||
}
|
||||
|
||||
leave:
|
||||
kfree(cfg_data);
|
||||
release_firmware(cf);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -60,13 +60,6 @@ static struct scsi_transport_template *csio_fcoe_transport_vport;
|
|||
/*
|
||||
* debugfs support
|
||||
*/
|
||||
static int
|
||||
csio_mem_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
file->private_data = inode->i_private;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t
|
||||
csio_mem_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
|
||||
{
|
||||
|
@ -110,7 +103,7 @@ csio_mem_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
|
|||
|
||||
static const struct file_operations csio_mem_debugfs_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = csio_mem_open,
|
||||
.open = simple_open,
|
||||
.read = csio_mem_read,
|
||||
.llseek = default_llseek,
|
||||
};
|
||||
|
|
|
@ -1564,6 +1564,7 @@ static int t4_uld_state_change(void *handle, enum cxgb4_state state)
|
|||
break;
|
||||
case CXGB4_STATE_DETACH:
|
||||
pr_info("cdev 0x%p, DETACH.\n", cdev);
|
||||
cxgbi_device_unregister(cdev);
|
||||
break;
|
||||
default:
|
||||
pr_info("cdev 0x%p, unknown state %d.\n", cdev, state);
|
||||
|
|
|
@ -495,7 +495,8 @@ void fnic_eth_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
|
|||
}
|
||||
|
||||
fnic_queue_wq_eth_desc(wq, skb, pa, skb->len,
|
||||
fnic->vlan_hw_insert, fnic->vlan_id, 1);
|
||||
0 /* hw inserts cos value */,
|
||||
fnic->vlan_id, 1);
|
||||
spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
|
||||
}
|
||||
|
||||
|
@ -563,7 +564,8 @@ static int fnic_send_frame(struct fnic *fnic, struct fc_frame *fp)
|
|||
}
|
||||
|
||||
fnic_queue_wq_desc(wq, skb, pa, tot_len, fr_eof(fp),
|
||||
fnic->vlan_hw_insert, fnic->vlan_id, 1, 1, 1);
|
||||
0 /* hw inserts cos value */,
|
||||
fnic->vlan_id, 1, 1, 1);
|
||||
fnic_send_frame_end:
|
||||
spin_unlock_irqrestore(&fnic->wq_lock[0], flags);
|
||||
|
||||
|
|
|
@ -1107,14 +1107,8 @@ static int gdth_init_pci(struct pci_dev *pdev, gdth_pci_str *pcistr,
|
|||
pci_read_config_word(pdev, PCI_COMMAND, &command);
|
||||
command |= 6;
|
||||
pci_write_config_word(pdev, PCI_COMMAND, command);
|
||||
if (pci_resource_start(pdev, 8) == 1UL)
|
||||
pci_resource_start(pdev, 8) = 0UL;
|
||||
i = 0xFEFF0001UL;
|
||||
pci_write_config_dword(pdev, PCI_ROM_ADDRESS, i);
|
||||
gdth_delay(1);
|
||||
pci_write_config_dword(pdev, PCI_ROM_ADDRESS,
|
||||
pci_resource_start(pdev, 8));
|
||||
|
||||
gdth_delay(1);
|
||||
|
||||
dp6m_ptr = ha->brd;
|
||||
|
||||
/* Ensure that it is safe to access the non HW portions of DPMEM.
|
||||
|
|
1296
drivers/scsi/ipr.c
1296
drivers/scsi/ipr.c
File diff suppressed because it is too large
Load Diff
|
@ -32,14 +32,15 @@
|
|||
#include <linux/libata.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/kref.h>
|
||||
#include <linux/blk-iopoll.h>
|
||||
#include <scsi/scsi.h>
|
||||
#include <scsi/scsi_cmnd.h>
|
||||
|
||||
/*
|
||||
* Literals
|
||||
*/
|
||||
#define IPR_DRIVER_VERSION "2.5.4"
|
||||
#define IPR_DRIVER_DATE "(July 11, 2012)"
|
||||
#define IPR_DRIVER_VERSION "2.6.0"
|
||||
#define IPR_DRIVER_DATE "(November 16, 2012)"
|
||||
|
||||
/*
|
||||
* IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding
|
||||
|
@ -82,6 +83,7 @@
|
|||
|
||||
#define IPR_SUBS_DEV_ID_57B4 0x033B
|
||||
#define IPR_SUBS_DEV_ID_57B2 0x035F
|
||||
#define IPR_SUBS_DEV_ID_57C0 0x0352
|
||||
#define IPR_SUBS_DEV_ID_57C3 0x0353
|
||||
#define IPR_SUBS_DEV_ID_57C4 0x0354
|
||||
#define IPR_SUBS_DEV_ID_57C6 0x0357
|
||||
|
@ -94,6 +96,10 @@
|
|||
#define IPR_SUBS_DEV_ID_574D 0x0356
|
||||
#define IPR_SUBS_DEV_ID_57C8 0x035D
|
||||
|
||||
#define IPR_SUBS_DEV_ID_57D5 0x03FB
|
||||
#define IPR_SUBS_DEV_ID_57D6 0x03FC
|
||||
#define IPR_SUBS_DEV_ID_57D7 0x03FF
|
||||
#define IPR_SUBS_DEV_ID_57D8 0x03FE
|
||||
#define IPR_NAME "ipr"
|
||||
|
||||
/*
|
||||
|
@ -298,6 +304,9 @@ IPR_PCII_NO_HOST_RRQ | IPR_PCII_IOARRIN_LOST | IPR_PCII_MMIO_ERROR)
|
|||
* Misc literals
|
||||
*/
|
||||
#define IPR_NUM_IOADL_ENTRIES IPR_MAX_SGLIST
|
||||
#define IPR_MAX_MSIX_VECTORS 0x5
|
||||
#define IPR_MAX_HRRQ_NUM 0x10
|
||||
#define IPR_INIT_HRRQ 0x0
|
||||
|
||||
/*
|
||||
* Adapter interface types
|
||||
|
@ -404,7 +413,7 @@ struct ipr_config_table_entry64 {
|
|||
__be64 dev_id;
|
||||
__be64 lun;
|
||||
__be64 lun_wwn[2];
|
||||
#define IPR_MAX_RES_PATH_LENGTH 24
|
||||
#define IPR_MAX_RES_PATH_LENGTH 48
|
||||
__be64 res_path;
|
||||
struct ipr_std_inq_data std_inq_data;
|
||||
u8 reserved2[4];
|
||||
|
@ -459,9 +468,39 @@ struct ipr_supported_device {
|
|||
u8 reserved2[16];
|
||||
}__attribute__((packed, aligned (4)));
|
||||
|
||||
struct ipr_hrr_queue {
|
||||
struct ipr_ioa_cfg *ioa_cfg;
|
||||
__be32 *host_rrq;
|
||||
dma_addr_t host_rrq_dma;
|
||||
#define IPR_HRRQ_REQ_RESP_HANDLE_MASK 0xfffffffc
|
||||
#define IPR_HRRQ_RESP_BIT_SET 0x00000002
|
||||
#define IPR_HRRQ_TOGGLE_BIT 0x00000001
|
||||
#define IPR_HRRQ_REQ_RESP_HANDLE_SHIFT 2
|
||||
#define IPR_ID_HRRQ_SELE_ENABLE 0x02
|
||||
volatile __be32 *hrrq_start;
|
||||
volatile __be32 *hrrq_end;
|
||||
volatile __be32 *hrrq_curr;
|
||||
|
||||
struct list_head hrrq_free_q;
|
||||
struct list_head hrrq_pending_q;
|
||||
spinlock_t _lock;
|
||||
spinlock_t *lock;
|
||||
|
||||
volatile u32 toggle_bit;
|
||||
u32 size;
|
||||
u32 min_cmd_id;
|
||||
u32 max_cmd_id;
|
||||
u8 allow_interrupts:1;
|
||||
u8 ioa_is_dead:1;
|
||||
u8 allow_cmds:1;
|
||||
|
||||
struct blk_iopoll iopoll;
|
||||
};
|
||||
|
||||
/* Command packet structure */
|
||||
struct ipr_cmd_pkt {
|
||||
__be16 reserved; /* Reserved by IOA */
|
||||
u8 reserved; /* Reserved by IOA */
|
||||
u8 hrrq_id;
|
||||
u8 request_type;
|
||||
#define IPR_RQTYPE_SCSICDB 0x00
|
||||
#define IPR_RQTYPE_IOACMD 0x01
|
||||
|
@ -1022,6 +1061,10 @@ struct ipr_hostrcb64_fabric_desc {
|
|||
struct ipr_hostrcb64_config_element elem[1];
|
||||
}__attribute__((packed, aligned (8)));
|
||||
|
||||
#define for_each_hrrq(hrrq, ioa_cfg) \
|
||||
for (hrrq = (ioa_cfg)->hrrq; \
|
||||
hrrq < ((ioa_cfg)->hrrq + (ioa_cfg)->hrrq_num); hrrq++)
|
||||
|
||||
#define for_each_fabric_cfg(fabric, cfg) \
|
||||
for (cfg = (fabric)->elem; \
|
||||
cfg < ((fabric)->elem + be16_to_cpu((fabric)->num_entries)); \
|
||||
|
@ -1308,6 +1351,7 @@ struct ipr_chip_cfg_t {
|
|||
u16 max_cmds;
|
||||
u8 cache_line_size;
|
||||
u8 clear_isr;
|
||||
u32 iopoll_weight;
|
||||
struct ipr_interrupt_offsets regs;
|
||||
};
|
||||
|
||||
|
@ -1317,6 +1361,7 @@ struct ipr_chip_t {
|
|||
u16 intr_type;
|
||||
#define IPR_USE_LSI 0x00
|
||||
#define IPR_USE_MSI 0x01
|
||||
#define IPR_USE_MSIX 0x02
|
||||
u16 sis_type;
|
||||
#define IPR_SIS32 0x00
|
||||
#define IPR_SIS64 0x01
|
||||
|
@ -1375,13 +1420,10 @@ struct ipr_ioa_cfg {
|
|||
|
||||
struct list_head queue;
|
||||
|
||||
u8 allow_interrupts:1;
|
||||
u8 in_reset_reload:1;
|
||||
u8 in_ioa_bringdown:1;
|
||||
u8 ioa_unit_checked:1;
|
||||
u8 ioa_is_dead:1;
|
||||
u8 dump_taken:1;
|
||||
u8 allow_cmds:1;
|
||||
u8 allow_ml_add_del:1;
|
||||
u8 needs_hard_reset:1;
|
||||
u8 dual_raid:1;
|
||||
|
@ -1413,21 +1455,7 @@ struct ipr_ioa_cfg {
|
|||
char trace_start[8];
|
||||
#define IPR_TRACE_START_LABEL "trace"
|
||||
struct ipr_trace_entry *trace;
|
||||
u32 trace_index:IPR_NUM_TRACE_INDEX_BITS;
|
||||
|
||||
/*
|
||||
* Queue for free command blocks
|
||||
*/
|
||||
char ipr_free_label[8];
|
||||
#define IPR_FREEQ_LABEL "free-q"
|
||||
struct list_head free_q;
|
||||
|
||||
/*
|
||||
* Queue for command blocks outstanding to the adapter
|
||||
*/
|
||||
char ipr_pending_label[8];
|
||||
#define IPR_PENDQ_LABEL "pend-q"
|
||||
struct list_head pending_q;
|
||||
atomic_t trace_index;
|
||||
|
||||
char cfg_table_start[8];
|
||||
#define IPR_CFG_TBL_START "cfg"
|
||||
|
@ -1452,16 +1480,10 @@ struct ipr_ioa_cfg {
|
|||
struct list_head hostrcb_free_q;
|
||||
struct list_head hostrcb_pending_q;
|
||||
|
||||
__be32 *host_rrq;
|
||||
dma_addr_t host_rrq_dma;
|
||||
#define IPR_HRRQ_REQ_RESP_HANDLE_MASK 0xfffffffc
|
||||
#define IPR_HRRQ_RESP_BIT_SET 0x00000002
|
||||
#define IPR_HRRQ_TOGGLE_BIT 0x00000001
|
||||
#define IPR_HRRQ_REQ_RESP_HANDLE_SHIFT 2
|
||||
volatile __be32 *hrrq_start;
|
||||
volatile __be32 *hrrq_end;
|
||||
volatile __be32 *hrrq_curr;
|
||||
volatile u32 toggle_bit;
|
||||
struct ipr_hrr_queue hrrq[IPR_MAX_HRRQ_NUM];
|
||||
u32 hrrq_num;
|
||||
atomic_t hrrq_index;
|
||||
u16 identify_hrrq_index;
|
||||
|
||||
struct ipr_bus_attributes bus_attr[IPR_MAX_NUM_BUSES];
|
||||
|
||||
|
@ -1507,6 +1529,17 @@ struct ipr_ioa_cfg {
|
|||
u32 max_cmds;
|
||||
struct ipr_cmnd **ipr_cmnd_list;
|
||||
dma_addr_t *ipr_cmnd_list_dma;
|
||||
|
||||
u16 intr_flag;
|
||||
unsigned int nvectors;
|
||||
|
||||
struct {
|
||||
unsigned short vec;
|
||||
char desc[22];
|
||||
} vectors_info[IPR_MAX_MSIX_VECTORS];
|
||||
|
||||
u32 iopoll_weight;
|
||||
|
||||
}; /* struct ipr_ioa_cfg */
|
||||
|
||||
struct ipr_cmnd {
|
||||
|
@ -1544,6 +1577,7 @@ struct ipr_cmnd {
|
|||
struct scsi_device *sdev;
|
||||
} u;
|
||||
|
||||
struct ipr_hrr_queue *hrrq;
|
||||
struct ipr_ioa_cfg *ioa_cfg;
|
||||
};
|
||||
|
||||
|
@ -1717,7 +1751,8 @@ struct ipr_ucode_image_header {
|
|||
if (ipr_is_device(hostrcb)) { \
|
||||
if ((hostrcb)->ioa_cfg->sis64) { \
|
||||
printk(KERN_ERR IPR_NAME ": %s: " fmt, \
|
||||
ipr_format_res_path(hostrcb->hcam.u.error64.fd_res_path, \
|
||||
ipr_format_res_path(hostrcb->ioa_cfg, \
|
||||
hostrcb->hcam.u.error64.fd_res_path, \
|
||||
hostrcb->rp_buffer, \
|
||||
sizeof(hostrcb->rp_buffer)), \
|
||||
__VA_ARGS__); \
|
||||
|
|
|
@ -466,11 +466,13 @@ enum intr_type_t {
|
|||
MSIX,
|
||||
};
|
||||
|
||||
#define LPFC_CT_CTX_MAX 64
|
||||
struct unsol_rcv_ct_ctx {
|
||||
uint32_t ctxt_id;
|
||||
uint32_t SID;
|
||||
uint32_t flags;
|
||||
#define UNSOL_VALID 0x00000001
|
||||
uint32_t valid;
|
||||
#define UNSOL_INVALID 0
|
||||
#define UNSOL_VALID 1
|
||||
uint16_t oxid;
|
||||
uint16_t rxid;
|
||||
};
|
||||
|
@ -750,6 +752,15 @@ struct lpfc_hba {
|
|||
void __iomem *ctrl_regs_memmap_p;/* Kernel memory mapped address for
|
||||
PCI BAR2 */
|
||||
|
||||
void __iomem *pci_bar0_memmap_p; /* Kernel memory mapped address for
|
||||
PCI BAR0 with dual-ULP support */
|
||||
void __iomem *pci_bar2_memmap_p; /* Kernel memory mapped address for
|
||||
PCI BAR2 with dual-ULP support */
|
||||
void __iomem *pci_bar4_memmap_p; /* Kernel memory mapped address for
|
||||
PCI BAR4 with dual-ULP support */
|
||||
#define PCI_64BIT_BAR0 0
|
||||
#define PCI_64BIT_BAR2 2
|
||||
#define PCI_64BIT_BAR4 4
|
||||
void __iomem *MBslimaddr; /* virtual address for mbox cmds */
|
||||
void __iomem *HAregaddr; /* virtual address for host attn reg */
|
||||
void __iomem *CAregaddr; /* virtual address for chip attn reg */
|
||||
|
@ -938,7 +949,7 @@ struct lpfc_hba {
|
|||
|
||||
spinlock_t ct_ev_lock; /* synchronize access to ct_ev_waiters */
|
||||
struct list_head ct_ev_waiters;
|
||||
struct unsol_rcv_ct_ctx ct_ctx[64];
|
||||
struct unsol_rcv_ct_ctx ct_ctx[LPFC_CT_CTX_MAX];
|
||||
uint32_t ctx_idx;
|
||||
|
||||
uint8_t menlo_flag; /* menlo generic flags */
|
||||
|
|
|
@ -955,9 +955,9 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
|||
spin_lock_irqsave(&phba->ct_ev_lock, flags);
|
||||
if (phba->sli_rev == LPFC_SLI_REV4) {
|
||||
evt_dat->immed_dat = phba->ctx_idx;
|
||||
phba->ctx_idx = (phba->ctx_idx + 1) % 64;
|
||||
phba->ctx_idx = (phba->ctx_idx + 1) % LPFC_CT_CTX_MAX;
|
||||
/* Provide warning for over-run of the ct_ctx array */
|
||||
if (phba->ct_ctx[evt_dat->immed_dat].flags &
|
||||
if (phba->ct_ctx[evt_dat->immed_dat].valid ==
|
||||
UNSOL_VALID)
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
|
||||
"2717 CT context array entry "
|
||||
|
@ -973,7 +973,7 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
|||
piocbq->iocb.unsli3.rcvsli3.ox_id;
|
||||
phba->ct_ctx[evt_dat->immed_dat].SID =
|
||||
piocbq->iocb.un.rcvels.remoteID;
|
||||
phba->ct_ctx[evt_dat->immed_dat].flags = UNSOL_VALID;
|
||||
phba->ct_ctx[evt_dat->immed_dat].valid = UNSOL_VALID;
|
||||
} else
|
||||
evt_dat->immed_dat = piocbq->iocb.ulpContext;
|
||||
|
||||
|
@ -1012,6 +1012,47 @@ lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
|||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_bsg_ct_unsol_abort - handler ct abort to management plane
|
||||
* @phba: Pointer to HBA context object.
|
||||
* @dmabuf: pointer to a dmabuf that describes the FC sequence
|
||||
*
|
||||
* This function handles abort to the CT command toward management plane
|
||||
* for SLI4 port.
|
||||
*
|
||||
* If the pending context of a CT command to management plane present, clears
|
||||
* such context and returns 1 for handled; otherwise, it returns 0 indicating
|
||||
* no context exists.
|
||||
**/
|
||||
int
|
||||
lpfc_bsg_ct_unsol_abort(struct lpfc_hba *phba, struct hbq_dmabuf *dmabuf)
|
||||
{
|
||||
struct fc_frame_header fc_hdr;
|
||||
struct fc_frame_header *fc_hdr_ptr = &fc_hdr;
|
||||
int ctx_idx, handled = 0;
|
||||
uint16_t oxid, rxid;
|
||||
uint32_t sid;
|
||||
|
||||
memcpy(fc_hdr_ptr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
|
||||
sid = sli4_sid_from_fc_hdr(fc_hdr_ptr);
|
||||
oxid = be16_to_cpu(fc_hdr_ptr->fh_ox_id);
|
||||
rxid = be16_to_cpu(fc_hdr_ptr->fh_rx_id);
|
||||
|
||||
for (ctx_idx = 0; ctx_idx < LPFC_CT_CTX_MAX; ctx_idx++) {
|
||||
if (phba->ct_ctx[ctx_idx].valid != UNSOL_VALID)
|
||||
continue;
|
||||
if (phba->ct_ctx[ctx_idx].rxid != rxid)
|
||||
continue;
|
||||
if (phba->ct_ctx[ctx_idx].oxid != oxid)
|
||||
continue;
|
||||
if (phba->ct_ctx[ctx_idx].SID != sid)
|
||||
continue;
|
||||
phba->ct_ctx[ctx_idx].valid = UNSOL_INVALID;
|
||||
handled = 1;
|
||||
}
|
||||
return handled;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command
|
||||
* @job: SET_EVENT fc_bsg_job
|
||||
|
@ -1318,7 +1359,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
|
|||
icmd->ulpClass = CLASS3;
|
||||
if (phba->sli_rev == LPFC_SLI_REV4) {
|
||||
/* Do not issue unsol response if oxid not marked as valid */
|
||||
if (!(phba->ct_ctx[tag].flags & UNSOL_VALID)) {
|
||||
if (phba->ct_ctx[tag].valid != UNSOL_VALID) {
|
||||
rc = IOCB_ERROR;
|
||||
goto issue_ct_rsp_exit;
|
||||
}
|
||||
|
@ -1352,7 +1393,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
|
|||
phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
|
||||
|
||||
/* The exchange is done, mark the entry as invalid */
|
||||
phba->ct_ctx[tag].flags &= ~UNSOL_VALID;
|
||||
phba->ct_ctx[tag].valid = UNSOL_INVALID;
|
||||
} else
|
||||
icmd->ulpContext = (ushort) tag;
|
||||
|
||||
|
|
|
@ -164,8 +164,7 @@ void lpfc_hb_timeout_handler(struct lpfc_hba *);
|
|||
|
||||
void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
|
||||
struct lpfc_iocbq *);
|
||||
void lpfc_sli4_ct_abort_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
|
||||
struct lpfc_iocbq *);
|
||||
int lpfc_ct_handle_unsol_abort(struct lpfc_hba *, struct hbq_dmabuf *);
|
||||
int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t);
|
||||
int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int);
|
||||
void lpfc_fdmi_tmo(unsigned long);
|
||||
|
@ -427,6 +426,7 @@ int lpfc_bsg_request(struct fc_bsg_job *);
|
|||
int lpfc_bsg_timeout(struct fc_bsg_job *);
|
||||
int lpfc_bsg_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
|
||||
struct lpfc_iocbq *);
|
||||
int lpfc_bsg_ct_unsol_abort(struct lpfc_hba *, struct hbq_dmabuf *);
|
||||
void __lpfc_sli_ringtx_put(struct lpfc_hba *, struct lpfc_sli_ring *,
|
||||
struct lpfc_iocbq *);
|
||||
struct lpfc_iocbq *lpfc_sli_ringtx_get(struct lpfc_hba *,
|
||||
|
|
|
@ -164,37 +164,24 @@ lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
|||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli4_ct_abort_unsol_event - Default handle for sli4 unsol abort
|
||||
* lpfc_ct_handle_unsol_abort - ct upper level protocol abort handler
|
||||
* @phba: Pointer to HBA context object.
|
||||
* @pring: Pointer to the driver internal I/O ring.
|
||||
* @piocbq: Pointer to the IOCBQ.
|
||||
* @dmabuf: pointer to a dmabuf that describes the FC sequence
|
||||
*
|
||||
* This function serves as the default handler for the sli4 unsolicited
|
||||
* abort event. It shall be invoked when there is no application interface
|
||||
* registered unsolicited abort handler. This handler does nothing but
|
||||
* just simply releases the dma buffer used by the unsol abort event.
|
||||
* This function serves as the upper level protocol abort handler for CT
|
||||
* protocol.
|
||||
*
|
||||
* Return 1 if abort has been handled, 0 otherwise.
|
||||
**/
|
||||
void
|
||||
lpfc_sli4_ct_abort_unsol_event(struct lpfc_hba *phba,
|
||||
struct lpfc_sli_ring *pring,
|
||||
struct lpfc_iocbq *piocbq)
|
||||
int
|
||||
lpfc_ct_handle_unsol_abort(struct lpfc_hba *phba, struct hbq_dmabuf *dmabuf)
|
||||
{
|
||||
IOCB_t *icmd = &piocbq->iocb;
|
||||
struct lpfc_dmabuf *bdeBuf;
|
||||
uint32_t size;
|
||||
int handled;
|
||||
|
||||
/* Forward abort event to any process registered to receive ct event */
|
||||
if (lpfc_bsg_ct_unsol_event(phba, pring, piocbq) == 0)
|
||||
return;
|
||||
/* CT upper level goes through BSG */
|
||||
handled = lpfc_bsg_ct_unsol_abort(phba, dmabuf);
|
||||
|
||||
/* If there is no BDE associated with IOCB, there is nothing to do */
|
||||
if (icmd->ulpBdeCount == 0)
|
||||
return;
|
||||
bdeBuf = piocbq->context2;
|
||||
piocbq->context2 = NULL;
|
||||
size = icmd->un.cont64[0].tus.f.bdeSize;
|
||||
lpfc_ct_unsol_buffer(phba, piocbq, bdeBuf, size);
|
||||
lpfc_in_buf_free(phba, bdeBuf);
|
||||
return handled;
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
|
@ -3122,6 +3122,13 @@ lpfc_els_retry(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
|
|||
|
||||
case IOERR_SEQUENCE_TIMEOUT:
|
||||
case IOERR_INVALID_RPI:
|
||||
if (cmd == ELS_CMD_PLOGI &&
|
||||
did == NameServer_DID) {
|
||||
/* Continue forever if plogi to */
|
||||
/* the nameserver fails */
|
||||
maxretry = 0;
|
||||
delay = 100;
|
||||
}
|
||||
retry = 1;
|
||||
break;
|
||||
}
|
||||
|
@ -6517,7 +6524,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
|||
struct lpfc_nodelist *ndlp;
|
||||
struct ls_rjt stat;
|
||||
uint32_t *payload;
|
||||
uint32_t cmd, did, newnode, rjt_err = 0;
|
||||
uint32_t cmd, did, newnode;
|
||||
uint8_t rjt_exp, rjt_err = 0;
|
||||
IOCB_t *icmd = &elsiocb->iocb;
|
||||
|
||||
if (!vport || !(elsiocb->context2))
|
||||
|
@ -6606,12 +6614,14 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
|||
/* If Nport discovery is delayed, reject PLOGIs */
|
||||
if (vport->fc_flag & FC_DISC_DELAYED) {
|
||||
rjt_err = LSRJT_UNABLE_TPC;
|
||||
rjt_exp = LSEXP_NOTHING_MORE;
|
||||
break;
|
||||
}
|
||||
if (vport->port_state < LPFC_DISC_AUTH) {
|
||||
if (!(phba->pport->fc_flag & FC_PT2PT) ||
|
||||
(phba->pport->fc_flag & FC_PT2PT_PLOGI)) {
|
||||
rjt_err = LSRJT_UNABLE_TPC;
|
||||
rjt_exp = LSEXP_NOTHING_MORE;
|
||||
break;
|
||||
}
|
||||
/* We get here, and drop thru, if we are PT2PT with
|
||||
|
@ -6648,6 +6658,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
|||
lpfc_send_els_event(vport, ndlp, payload);
|
||||
if (vport->port_state < LPFC_DISC_AUTH) {
|
||||
rjt_err = LSRJT_UNABLE_TPC;
|
||||
rjt_exp = LSEXP_NOTHING_MORE;
|
||||
break;
|
||||
}
|
||||
lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_LOGO);
|
||||
|
@ -6661,6 +6672,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
|||
lpfc_send_els_event(vport, ndlp, payload);
|
||||
if (vport->port_state < LPFC_DISC_AUTH) {
|
||||
rjt_err = LSRJT_UNABLE_TPC;
|
||||
rjt_exp = LSEXP_NOTHING_MORE;
|
||||
break;
|
||||
}
|
||||
lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLO);
|
||||
|
@ -6680,6 +6692,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
|||
phba->fc_stat.elsRcvADISC++;
|
||||
if (vport->port_state < LPFC_DISC_AUTH) {
|
||||
rjt_err = LSRJT_UNABLE_TPC;
|
||||
rjt_exp = LSEXP_NOTHING_MORE;
|
||||
break;
|
||||
}
|
||||
lpfc_disc_state_machine(vport, ndlp, elsiocb,
|
||||
|
@ -6693,6 +6706,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
|||
phba->fc_stat.elsRcvPDISC++;
|
||||
if (vport->port_state < LPFC_DISC_AUTH) {
|
||||
rjt_err = LSRJT_UNABLE_TPC;
|
||||
rjt_exp = LSEXP_NOTHING_MORE;
|
||||
break;
|
||||
}
|
||||
lpfc_disc_state_machine(vport, ndlp, elsiocb,
|
||||
|
@ -6730,6 +6744,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
|||
phba->fc_stat.elsRcvPRLI++;
|
||||
if (vport->port_state < LPFC_DISC_AUTH) {
|
||||
rjt_err = LSRJT_UNABLE_TPC;
|
||||
rjt_exp = LSEXP_NOTHING_MORE;
|
||||
break;
|
||||
}
|
||||
lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PRLI);
|
||||
|
@ -6813,6 +6828,11 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
|||
if (newnode)
|
||||
lpfc_nlp_put(ndlp);
|
||||
break;
|
||||
case ELS_CMD_REC:
|
||||
/* receive this due to exchange closed */
|
||||
rjt_err = LSRJT_UNABLE_TPC;
|
||||
rjt_exp = LSEXP_INVALID_OX_RX;
|
||||
break;
|
||||
default:
|
||||
lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
|
||||
"RCV ELS cmd: cmd:x%x did:x%x/ste:x%x",
|
||||
|
@ -6820,6 +6840,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
|||
|
||||
/* Unsupported ELS command, reject */
|
||||
rjt_err = LSRJT_CMD_UNSUPPORTED;
|
||||
rjt_exp = LSEXP_NOTHING_MORE;
|
||||
|
||||
/* Unknown ELS command <elsCmd> received from NPORT <did> */
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
|
||||
|
@ -6834,7 +6855,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
|
|||
if (rjt_err) {
|
||||
memset(&stat, 0, sizeof(stat));
|
||||
stat.un.b.lsRjtRsnCode = rjt_err;
|
||||
stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE;
|
||||
stat.un.b.lsRjtRsnCodeExp = rjt_exp;
|
||||
lpfc_els_rsp_reject(vport, stat.un.lsRjtError, elsiocb, ndlp,
|
||||
NULL);
|
||||
}
|
||||
|
|
|
@ -538,6 +538,7 @@ struct fc_vft_header {
|
|||
#define ELS_CMD_ECHO 0x10000000
|
||||
#define ELS_CMD_TEST 0x11000000
|
||||
#define ELS_CMD_RRQ 0x12000000
|
||||
#define ELS_CMD_REC 0x13000000
|
||||
#define ELS_CMD_PRLI 0x20100014
|
||||
#define ELS_CMD_PRLO 0x21100014
|
||||
#define ELS_CMD_PRLO_ACC 0x02100014
|
||||
|
@ -574,6 +575,7 @@ struct fc_vft_header {
|
|||
#define ELS_CMD_ECHO 0x10
|
||||
#define ELS_CMD_TEST 0x11
|
||||
#define ELS_CMD_RRQ 0x12
|
||||
#define ELS_CMD_REC 0x13
|
||||
#define ELS_CMD_PRLI 0x14001020
|
||||
#define ELS_CMD_PRLO 0x14001021
|
||||
#define ELS_CMD_PRLO_ACC 0x14001002
|
||||
|
|
|
@ -106,6 +106,7 @@ struct lpfc_sli_intf {
|
|||
|
||||
#define LPFC_SLI4_MB_WORD_COUNT 64
|
||||
#define LPFC_MAX_MQ_PAGE 8
|
||||
#define LPFC_MAX_WQ_PAGE_V0 4
|
||||
#define LPFC_MAX_WQ_PAGE 8
|
||||
#define LPFC_MAX_CQ_PAGE 4
|
||||
#define LPFC_MAX_EQ_PAGE 8
|
||||
|
@ -703,24 +704,41 @@ struct lpfc_register {
|
|||
* BAR0. The offsets are the same so the driver must account for
|
||||
* any base address difference.
|
||||
*/
|
||||
#define LPFC_RQ_DOORBELL 0x00A0
|
||||
#define lpfc_rq_doorbell_num_posted_SHIFT 16
|
||||
#define lpfc_rq_doorbell_num_posted_MASK 0x3FFF
|
||||
#define lpfc_rq_doorbell_num_posted_WORD word0
|
||||
#define lpfc_rq_doorbell_id_SHIFT 0
|
||||
#define lpfc_rq_doorbell_id_MASK 0xFFFF
|
||||
#define lpfc_rq_doorbell_id_WORD word0
|
||||
#define LPFC_ULP0_RQ_DOORBELL 0x00A0
|
||||
#define LPFC_ULP1_RQ_DOORBELL 0x00C0
|
||||
#define lpfc_rq_db_list_fm_num_posted_SHIFT 24
|
||||
#define lpfc_rq_db_list_fm_num_posted_MASK 0x00FF
|
||||
#define lpfc_rq_db_list_fm_num_posted_WORD word0
|
||||
#define lpfc_rq_db_list_fm_index_SHIFT 16
|
||||
#define lpfc_rq_db_list_fm_index_MASK 0x00FF
|
||||
#define lpfc_rq_db_list_fm_index_WORD word0
|
||||
#define lpfc_rq_db_list_fm_id_SHIFT 0
|
||||
#define lpfc_rq_db_list_fm_id_MASK 0xFFFF
|
||||
#define lpfc_rq_db_list_fm_id_WORD word0
|
||||
#define lpfc_rq_db_ring_fm_num_posted_SHIFT 16
|
||||
#define lpfc_rq_db_ring_fm_num_posted_MASK 0x3FFF
|
||||
#define lpfc_rq_db_ring_fm_num_posted_WORD word0
|
||||
#define lpfc_rq_db_ring_fm_id_SHIFT 0
|
||||
#define lpfc_rq_db_ring_fm_id_MASK 0xFFFF
|
||||
#define lpfc_rq_db_ring_fm_id_WORD word0
|
||||
|
||||
#define LPFC_WQ_DOORBELL 0x0040
|
||||
#define lpfc_wq_doorbell_num_posted_SHIFT 24
|
||||
#define lpfc_wq_doorbell_num_posted_MASK 0x00FF
|
||||
#define lpfc_wq_doorbell_num_posted_WORD word0
|
||||
#define lpfc_wq_doorbell_index_SHIFT 16
|
||||
#define lpfc_wq_doorbell_index_MASK 0x00FF
|
||||
#define lpfc_wq_doorbell_index_WORD word0
|
||||
#define lpfc_wq_doorbell_id_SHIFT 0
|
||||
#define lpfc_wq_doorbell_id_MASK 0xFFFF
|
||||
#define lpfc_wq_doorbell_id_WORD word0
|
||||
#define LPFC_ULP0_WQ_DOORBELL 0x0040
|
||||
#define LPFC_ULP1_WQ_DOORBELL 0x0060
|
||||
#define lpfc_wq_db_list_fm_num_posted_SHIFT 24
|
||||
#define lpfc_wq_db_list_fm_num_posted_MASK 0x00FF
|
||||
#define lpfc_wq_db_list_fm_num_posted_WORD word0
|
||||
#define lpfc_wq_db_list_fm_index_SHIFT 16
|
||||
#define lpfc_wq_db_list_fm_index_MASK 0x00FF
|
||||
#define lpfc_wq_db_list_fm_index_WORD word0
|
||||
#define lpfc_wq_db_list_fm_id_SHIFT 0
|
||||
#define lpfc_wq_db_list_fm_id_MASK 0xFFFF
|
||||
#define lpfc_wq_db_list_fm_id_WORD word0
|
||||
#define lpfc_wq_db_ring_fm_num_posted_SHIFT 16
|
||||
#define lpfc_wq_db_ring_fm_num_posted_MASK 0x3FFF
|
||||
#define lpfc_wq_db_ring_fm_num_posted_WORD word0
|
||||
#define lpfc_wq_db_ring_fm_id_SHIFT 0
|
||||
#define lpfc_wq_db_ring_fm_id_MASK 0xFFFF
|
||||
#define lpfc_wq_db_ring_fm_id_WORD word0
|
||||
|
||||
#define LPFC_EQCQ_DOORBELL 0x0120
|
||||
#define lpfc_eqcq_doorbell_se_SHIFT 31
|
||||
|
@ -1131,12 +1149,22 @@ struct lpfc_mbx_wq_create {
|
|||
struct { /* Version 0 Request */
|
||||
uint32_t word0;
|
||||
#define lpfc_mbx_wq_create_num_pages_SHIFT 0
|
||||
#define lpfc_mbx_wq_create_num_pages_MASK 0x0000FFFF
|
||||
#define lpfc_mbx_wq_create_num_pages_MASK 0x000000FF
|
||||
#define lpfc_mbx_wq_create_num_pages_WORD word0
|
||||
#define lpfc_mbx_wq_create_dua_SHIFT 8
|
||||
#define lpfc_mbx_wq_create_dua_MASK 0x00000001
|
||||
#define lpfc_mbx_wq_create_dua_WORD word0
|
||||
#define lpfc_mbx_wq_create_cq_id_SHIFT 16
|
||||
#define lpfc_mbx_wq_create_cq_id_MASK 0x0000FFFF
|
||||
#define lpfc_mbx_wq_create_cq_id_WORD word0
|
||||
struct dma_address page[LPFC_MAX_WQ_PAGE];
|
||||
struct dma_address page[LPFC_MAX_WQ_PAGE_V0];
|
||||
uint32_t word9;
|
||||
#define lpfc_mbx_wq_create_bua_SHIFT 0
|
||||
#define lpfc_mbx_wq_create_bua_MASK 0x00000001
|
||||
#define lpfc_mbx_wq_create_bua_WORD word9
|
||||
#define lpfc_mbx_wq_create_ulp_num_SHIFT 8
|
||||
#define lpfc_mbx_wq_create_ulp_num_MASK 0x000000FF
|
||||
#define lpfc_mbx_wq_create_ulp_num_WORD word9
|
||||
} request;
|
||||
struct { /* Version 1 Request */
|
||||
uint32_t word0; /* Word 0 is the same as in v0 */
|
||||
|
@ -1160,6 +1188,17 @@ struct lpfc_mbx_wq_create {
|
|||
#define lpfc_mbx_wq_create_q_id_SHIFT 0
|
||||
#define lpfc_mbx_wq_create_q_id_MASK 0x0000FFFF
|
||||
#define lpfc_mbx_wq_create_q_id_WORD word0
|
||||
uint32_t doorbell_offset;
|
||||
uint32_t word2;
|
||||
#define lpfc_mbx_wq_create_bar_set_SHIFT 0
|
||||
#define lpfc_mbx_wq_create_bar_set_MASK 0x0000FFFF
|
||||
#define lpfc_mbx_wq_create_bar_set_WORD word2
|
||||
#define WQ_PCI_BAR_0_AND_1 0x00
|
||||
#define WQ_PCI_BAR_2_AND_3 0x01
|
||||
#define WQ_PCI_BAR_4_AND_5 0x02
|
||||
#define lpfc_mbx_wq_create_db_format_SHIFT 16
|
||||
#define lpfc_mbx_wq_create_db_format_MASK 0x0000FFFF
|
||||
#define lpfc_mbx_wq_create_db_format_WORD word2
|
||||
} response;
|
||||
} u;
|
||||
};
|
||||
|
@ -1223,14 +1262,31 @@ struct lpfc_mbx_rq_create {
|
|||
#define lpfc_mbx_rq_create_num_pages_SHIFT 0
|
||||
#define lpfc_mbx_rq_create_num_pages_MASK 0x0000FFFF
|
||||
#define lpfc_mbx_rq_create_num_pages_WORD word0
|
||||
#define lpfc_mbx_rq_create_dua_SHIFT 16
|
||||
#define lpfc_mbx_rq_create_dua_MASK 0x00000001
|
||||
#define lpfc_mbx_rq_create_dua_WORD word0
|
||||
#define lpfc_mbx_rq_create_bqu_SHIFT 17
|
||||
#define lpfc_mbx_rq_create_bqu_MASK 0x00000001
|
||||
#define lpfc_mbx_rq_create_bqu_WORD word0
|
||||
#define lpfc_mbx_rq_create_ulp_num_SHIFT 24
|
||||
#define lpfc_mbx_rq_create_ulp_num_MASK 0x000000FF
|
||||
#define lpfc_mbx_rq_create_ulp_num_WORD word0
|
||||
struct rq_context context;
|
||||
struct dma_address page[LPFC_MAX_WQ_PAGE];
|
||||
} request;
|
||||
struct {
|
||||
uint32_t word0;
|
||||
#define lpfc_mbx_rq_create_q_id_SHIFT 0
|
||||
#define lpfc_mbx_rq_create_q_id_MASK 0x0000FFFF
|
||||
#define lpfc_mbx_rq_create_q_id_WORD word0
|
||||
#define lpfc_mbx_rq_create_q_id_SHIFT 0
|
||||
#define lpfc_mbx_rq_create_q_id_MASK 0x0000FFFF
|
||||
#define lpfc_mbx_rq_create_q_id_WORD word0
|
||||
uint32_t doorbell_offset;
|
||||
uint32_t word2;
|
||||
#define lpfc_mbx_rq_create_bar_set_SHIFT 0
|
||||
#define lpfc_mbx_rq_create_bar_set_MASK 0x0000FFFF
|
||||
#define lpfc_mbx_rq_create_bar_set_WORD word2
|
||||
#define lpfc_mbx_rq_create_db_format_SHIFT 16
|
||||
#define lpfc_mbx_rq_create_db_format_MASK 0x0000FFFF
|
||||
#define lpfc_mbx_rq_create_db_format_WORD word2
|
||||
} response;
|
||||
} u;
|
||||
};
|
||||
|
@ -1388,6 +1444,33 @@ struct lpfc_mbx_get_rsrc_extent_info {
|
|||
} u;
|
||||
};
|
||||
|
||||
struct lpfc_mbx_query_fw_config {
|
||||
struct mbox_header header;
|
||||
struct {
|
||||
uint32_t config_number;
|
||||
#define LPFC_FC_FCOE 0x00000007
|
||||
uint32_t asic_revision;
|
||||
uint32_t physical_port;
|
||||
uint32_t function_mode;
|
||||
#define LPFC_FCOE_INI_MODE 0x00000040
|
||||
#define LPFC_FCOE_TGT_MODE 0x00000080
|
||||
#define LPFC_DUA_MODE 0x00000800
|
||||
uint32_t ulp0_mode;
|
||||
#define LPFC_ULP_FCOE_INIT_MODE 0x00000040
|
||||
#define LPFC_ULP_FCOE_TGT_MODE 0x00000080
|
||||
uint32_t ulp0_nap_words[12];
|
||||
uint32_t ulp1_mode;
|
||||
uint32_t ulp1_nap_words[12];
|
||||
uint32_t function_capabilities;
|
||||
uint32_t cqid_base;
|
||||
uint32_t cqid_tot;
|
||||
uint32_t eqid_base;
|
||||
uint32_t eqid_tot;
|
||||
uint32_t ulp0_nap2_words[2];
|
||||
uint32_t ulp1_nap2_words[2];
|
||||
} rsp;
|
||||
};
|
||||
|
||||
struct lpfc_id_range {
|
||||
uint32_t word5;
|
||||
#define lpfc_mbx_rsrc_id_word4_0_SHIFT 0
|
||||
|
@ -1803,51 +1886,6 @@ struct lpfc_mbx_redisc_fcf_tbl {
|
|||
#define lpfc_mbx_redisc_fcf_index_WORD word12
|
||||
};
|
||||
|
||||
struct lpfc_mbx_query_fw_cfg {
|
||||
struct mbox_header header;
|
||||
uint32_t config_number;
|
||||
uint32_t asic_rev;
|
||||
uint32_t phys_port;
|
||||
uint32_t function_mode;
|
||||
/* firmware Function Mode */
|
||||
#define lpfc_function_mode_toe_SHIFT 0
|
||||
#define lpfc_function_mode_toe_MASK 0x00000001
|
||||
#define lpfc_function_mode_toe_WORD function_mode
|
||||
#define lpfc_function_mode_nic_SHIFT 1
|
||||
#define lpfc_function_mode_nic_MASK 0x00000001
|
||||
#define lpfc_function_mode_nic_WORD function_mode
|
||||
#define lpfc_function_mode_rdma_SHIFT 2
|
||||
#define lpfc_function_mode_rdma_MASK 0x00000001
|
||||
#define lpfc_function_mode_rdma_WORD function_mode
|
||||
#define lpfc_function_mode_vm_SHIFT 3
|
||||
#define lpfc_function_mode_vm_MASK 0x00000001
|
||||
#define lpfc_function_mode_vm_WORD function_mode
|
||||
#define lpfc_function_mode_iscsi_i_SHIFT 4
|
||||
#define lpfc_function_mode_iscsi_i_MASK 0x00000001
|
||||
#define lpfc_function_mode_iscsi_i_WORD function_mode
|
||||
#define lpfc_function_mode_iscsi_t_SHIFT 5
|
||||
#define lpfc_function_mode_iscsi_t_MASK 0x00000001
|
||||
#define lpfc_function_mode_iscsi_t_WORD function_mode
|
||||
#define lpfc_function_mode_fcoe_i_SHIFT 6
|
||||
#define lpfc_function_mode_fcoe_i_MASK 0x00000001
|
||||
#define lpfc_function_mode_fcoe_i_WORD function_mode
|
||||
#define lpfc_function_mode_fcoe_t_SHIFT 7
|
||||
#define lpfc_function_mode_fcoe_t_MASK 0x00000001
|
||||
#define lpfc_function_mode_fcoe_t_WORD function_mode
|
||||
#define lpfc_function_mode_dal_SHIFT 8
|
||||
#define lpfc_function_mode_dal_MASK 0x00000001
|
||||
#define lpfc_function_mode_dal_WORD function_mode
|
||||
#define lpfc_function_mode_lro_SHIFT 9
|
||||
#define lpfc_function_mode_lro_MASK 0x00000001
|
||||
#define lpfc_function_mode_lro_WORD function_mode
|
||||
#define lpfc_function_mode_flex10_SHIFT 10
|
||||
#define lpfc_function_mode_flex10_MASK 0x00000001
|
||||
#define lpfc_function_mode_flex10_WORD function_mode
|
||||
#define lpfc_function_mode_ncsi_SHIFT 11
|
||||
#define lpfc_function_mode_ncsi_MASK 0x00000001
|
||||
#define lpfc_function_mode_ncsi_WORD function_mode
|
||||
};
|
||||
|
||||
/* Status field for embedded SLI_CONFIG mailbox command */
|
||||
#define STATUS_SUCCESS 0x0
|
||||
#define STATUS_FAILED 0x1
|
||||
|
@ -2965,7 +3003,7 @@ struct lpfc_mqe {
|
|||
struct lpfc_mbx_read_config rd_config;
|
||||
struct lpfc_mbx_request_features req_ftrs;
|
||||
struct lpfc_mbx_post_hdr_tmpl hdr_tmpl;
|
||||
struct lpfc_mbx_query_fw_cfg query_fw_cfg;
|
||||
struct lpfc_mbx_query_fw_config query_fw_cfg;
|
||||
struct lpfc_mbx_supp_pages supp_pages;
|
||||
struct lpfc_mbx_pc_sli4_params sli4_params;
|
||||
struct lpfc_mbx_get_sli4_parameters get_sli4_parameters;
|
||||
|
|
|
@ -6229,9 +6229,11 @@ lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba, uint32_t if_type)
|
|||
phba->sli4_hba.conf_regs_memmap_p +
|
||||
LPFC_CTL_PORT_SEM_OFFSET;
|
||||
phba->sli4_hba.RQDBregaddr =
|
||||
phba->sli4_hba.conf_regs_memmap_p + LPFC_RQ_DOORBELL;
|
||||
phba->sli4_hba.conf_regs_memmap_p +
|
||||
LPFC_ULP0_RQ_DOORBELL;
|
||||
phba->sli4_hba.WQDBregaddr =
|
||||
phba->sli4_hba.conf_regs_memmap_p + LPFC_WQ_DOORBELL;
|
||||
phba->sli4_hba.conf_regs_memmap_p +
|
||||
LPFC_ULP0_WQ_DOORBELL;
|
||||
phba->sli4_hba.EQCQDBregaddr =
|
||||
phba->sli4_hba.conf_regs_memmap_p + LPFC_EQCQ_DOORBELL;
|
||||
phba->sli4_hba.MQDBregaddr =
|
||||
|
@ -6285,9 +6287,11 @@ lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
|
|||
return -ENODEV;
|
||||
|
||||
phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
|
||||
vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL);
|
||||
vf * LPFC_VFR_PAGE_SIZE +
|
||||
LPFC_ULP0_RQ_DOORBELL);
|
||||
phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
|
||||
vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL);
|
||||
vf * LPFC_VFR_PAGE_SIZE +
|
||||
LPFC_ULP0_WQ_DOORBELL);
|
||||
phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
|
||||
vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL);
|
||||
phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
|
||||
|
@ -6983,6 +6987,19 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
|
|||
phba->sli4_hba.fcp_wq = NULL;
|
||||
}
|
||||
|
||||
if (phba->pci_bar0_memmap_p) {
|
||||
iounmap(phba->pci_bar0_memmap_p);
|
||||
phba->pci_bar0_memmap_p = NULL;
|
||||
}
|
||||
if (phba->pci_bar2_memmap_p) {
|
||||
iounmap(phba->pci_bar2_memmap_p);
|
||||
phba->pci_bar2_memmap_p = NULL;
|
||||
}
|
||||
if (phba->pci_bar4_memmap_p) {
|
||||
iounmap(phba->pci_bar4_memmap_p);
|
||||
phba->pci_bar4_memmap_p = NULL;
|
||||
}
|
||||
|
||||
/* Release FCP CQ mapping array */
|
||||
if (phba->sli4_hba.fcp_cq_map != NULL) {
|
||||
kfree(phba->sli4_hba.fcp_cq_map);
|
||||
|
@ -7046,6 +7063,53 @@ lpfc_sli4_queue_setup(struct lpfc_hba *phba)
|
|||
int rc = -ENOMEM;
|
||||
int fcp_eqidx, fcp_cqidx, fcp_wqidx;
|
||||
int fcp_cq_index = 0;
|
||||
uint32_t shdr_status, shdr_add_status;
|
||||
union lpfc_sli4_cfg_shdr *shdr;
|
||||
LPFC_MBOXQ_t *mboxq;
|
||||
uint32_t length;
|
||||
|
||||
/* Check for dual-ULP support */
|
||||
mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
||||
if (!mboxq) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"3249 Unable to allocate memory for "
|
||||
"QUERY_FW_CFG mailbox command\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
length = (sizeof(struct lpfc_mbx_query_fw_config) -
|
||||
sizeof(struct lpfc_sli4_cfg_mhdr));
|
||||
lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
|
||||
LPFC_MBOX_OPCODE_QUERY_FW_CFG,
|
||||
length, LPFC_SLI4_MBX_EMBED);
|
||||
|
||||
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
|
||||
|
||||
shdr = (union lpfc_sli4_cfg_shdr *)
|
||||
&mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
|
||||
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
|
||||
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
|
||||
if (shdr_status || shdr_add_status || rc) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"3250 QUERY_FW_CFG mailbox failed with status "
|
||||
"x%x add_status x%x, mbx status x%x\n",
|
||||
shdr_status, shdr_add_status, rc);
|
||||
if (rc != MBX_TIMEOUT)
|
||||
mempool_free(mboxq, phba->mbox_mem_pool);
|
||||
rc = -ENXIO;
|
||||
goto out_error;
|
||||
}
|
||||
|
||||
phba->sli4_hba.fw_func_mode =
|
||||
mboxq->u.mqe.un.query_fw_cfg.rsp.function_mode;
|
||||
phba->sli4_hba.ulp0_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp0_mode;
|
||||
phba->sli4_hba.ulp1_mode = mboxq->u.mqe.un.query_fw_cfg.rsp.ulp1_mode;
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
|
||||
"3251 QUERY_FW_CFG: func_mode:x%x, ulp0_mode:x%x, "
|
||||
"ulp1_mode:x%x\n", phba->sli4_hba.fw_func_mode,
|
||||
phba->sli4_hba.ulp0_mode, phba->sli4_hba.ulp1_mode);
|
||||
|
||||
if (rc != MBX_TIMEOUT)
|
||||
mempool_free(mboxq, phba->mbox_mem_pool);
|
||||
|
||||
/*
|
||||
* Set up HBA Event Queues (EQs)
|
||||
|
@ -7659,78 +7723,6 @@ lpfc_pci_function_reset(struct lpfc_hba *phba)
|
|||
return rc;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
* @cnt: number of nop mailbox commands to send.
|
||||
*
|
||||
* This routine is invoked to send a number @cnt of NOP mailbox command and
|
||||
* wait for each command to complete.
|
||||
*
|
||||
* Return: the number of NOP mailbox command completed.
|
||||
**/
|
||||
static int
|
||||
lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt)
|
||||
{
|
||||
LPFC_MBOXQ_t *mboxq;
|
||||
int length, cmdsent;
|
||||
uint32_t mbox_tmo;
|
||||
uint32_t rc = 0;
|
||||
uint32_t shdr_status, shdr_add_status;
|
||||
union lpfc_sli4_cfg_shdr *shdr;
|
||||
|
||||
if (cnt == 0) {
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
|
||||
"2518 Requested to send 0 NOP mailbox cmd\n");
|
||||
return cnt;
|
||||
}
|
||||
|
||||
mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
|
||||
if (!mboxq) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"2519 Unable to allocate memory for issuing "
|
||||
"NOP mailbox command\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Set up NOP SLI4_CONFIG mailbox-ioctl command */
|
||||
length = (sizeof(struct lpfc_mbx_nop) -
|
||||
sizeof(struct lpfc_sli4_cfg_mhdr));
|
||||
|
||||
for (cmdsent = 0; cmdsent < cnt; cmdsent++) {
|
||||
lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
|
||||
LPFC_MBOX_OPCODE_NOP, length,
|
||||
LPFC_SLI4_MBX_EMBED);
|
||||
if (!phba->sli4_hba.intr_enable)
|
||||
rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
|
||||
else {
|
||||
mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
|
||||
rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
|
||||
}
|
||||
if (rc == MBX_TIMEOUT)
|
||||
break;
|
||||
/* Check return status */
|
||||
shdr = (union lpfc_sli4_cfg_shdr *)
|
||||
&mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
|
||||
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
|
||||
shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
|
||||
&shdr->response);
|
||||
if (shdr_status || shdr_add_status || rc) {
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
|
||||
"2520 NOP mailbox command failed "
|
||||
"status x%x add_status x%x mbx "
|
||||
"status x%x\n", shdr_status,
|
||||
shdr_add_status, rc);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (rc != MBX_TIMEOUT)
|
||||
mempool_free(mboxq, phba->mbox_mem_pool);
|
||||
|
||||
return cmdsent;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
|
@ -8498,37 +8490,6 @@ lpfc_unset_hba(struct lpfc_hba *phba)
|
|||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli4_unset_hba - Unset SLI4 hba device initialization.
|
||||
* @phba: pointer to lpfc hba data structure.
|
||||
*
|
||||
* This routine is invoked to unset the HBA device initialization steps to
|
||||
* a device with SLI-4 interface spec.
|
||||
**/
|
||||
static void
|
||||
lpfc_sli4_unset_hba(struct lpfc_hba *phba)
|
||||
{
|
||||
struct lpfc_vport *vport = phba->pport;
|
||||
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
|
||||
|
||||
spin_lock_irq(shost->host_lock);
|
||||
vport->load_flag |= FC_UNLOADING;
|
||||
spin_unlock_irq(shost->host_lock);
|
||||
|
||||
phba->pport->work_port_events = 0;
|
||||
|
||||
/* Stop the SLI4 device port */
|
||||
lpfc_stop_port(phba);
|
||||
|
||||
lpfc_sli4_disable_intr(phba);
|
||||
|
||||
/* Reset SLI4 HBA FCoE function */
|
||||
lpfc_pci_function_reset(phba);
|
||||
lpfc_sli4_queue_destroy(phba);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
|
||||
* @phba: Pointer to HBA context object.
|
||||
|
@ -9591,7 +9552,6 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
|
|||
struct Scsi_Host *shost = NULL;
|
||||
int error, ret;
|
||||
uint32_t cfg_mode, intr_mode;
|
||||
int mcnt;
|
||||
int adjusted_fcp_io_channel;
|
||||
|
||||
/* Allocate memory for HBA structure */
|
||||
|
@ -9680,57 +9640,34 @@ lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
|
|||
shost = lpfc_shost_from_vport(vport); /* save shost for error cleanup */
|
||||
/* Now, trying to enable interrupt and bring up the device */
|
||||
cfg_mode = phba->cfg_use_msi;
|
||||
while (true) {
|
||||
/* Put device to a known state before enabling interrupt */
|
||||
lpfc_stop_port(phba);
|
||||
/* Configure and enable interrupt */
|
||||
intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
|
||||
if (intr_mode == LPFC_INTR_ERROR) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"0426 Failed to enable interrupt.\n");
|
||||
error = -ENODEV;
|
||||
goto out_free_sysfs_attr;
|
||||
}
|
||||
/* Default to single EQ for non-MSI-X */
|
||||
if (phba->intr_type != MSIX)
|
||||
adjusted_fcp_io_channel = 1;
|
||||
else
|
||||
adjusted_fcp_io_channel = phba->cfg_fcp_io_channel;
|
||||
phba->cfg_fcp_io_channel = adjusted_fcp_io_channel;
|
||||
/* Set up SLI-4 HBA */
|
||||
if (lpfc_sli4_hba_setup(phba)) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"1421 Failed to set up hba\n");
|
||||
error = -ENODEV;
|
||||
goto out_disable_intr;
|
||||
}
|
||||
|
||||
/* Send NOP mbx cmds for non-INTx mode active interrupt test */
|
||||
if (intr_mode != 0)
|
||||
mcnt = lpfc_sli4_send_nop_mbox_cmds(phba,
|
||||
LPFC_ACT_INTR_CNT);
|
||||
|
||||
/* Check active interrupts received only for MSI/MSI-X */
|
||||
if (intr_mode == 0 ||
|
||||
phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) {
|
||||
/* Log the current active interrupt mode */
|
||||
phba->intr_mode = intr_mode;
|
||||
lpfc_log_intr_mode(phba, intr_mode);
|
||||
break;
|
||||
}
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
|
||||
"0451 Configure interrupt mode (%d) "
|
||||
"failed active interrupt test.\n",
|
||||
intr_mode);
|
||||
/* Unset the previous SLI-4 HBA setup. */
|
||||
/*
|
||||
* TODO: Is this operation compatible with IF TYPE 2
|
||||
* devices? All port state is deleted and cleared.
|
||||
*/
|
||||
lpfc_sli4_unset_hba(phba);
|
||||
/* Try next level of interrupt mode */
|
||||
cfg_mode = --intr_mode;
|
||||
/* Put device to a known state before enabling interrupt */
|
||||
lpfc_stop_port(phba);
|
||||
/* Configure and enable interrupt */
|
||||
intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
|
||||
if (intr_mode == LPFC_INTR_ERROR) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"0426 Failed to enable interrupt.\n");
|
||||
error = -ENODEV;
|
||||
goto out_free_sysfs_attr;
|
||||
}
|
||||
/* Default to single EQ for non-MSI-X */
|
||||
if (phba->intr_type != MSIX)
|
||||
adjusted_fcp_io_channel = 1;
|
||||
else
|
||||
adjusted_fcp_io_channel = phba->cfg_fcp_io_channel;
|
||||
phba->cfg_fcp_io_channel = adjusted_fcp_io_channel;
|
||||
/* Set up SLI-4 HBA */
|
||||
if (lpfc_sli4_hba_setup(phba)) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"1421 Failed to set up hba\n");
|
||||
error = -ENODEV;
|
||||
goto out_disable_intr;
|
||||
}
|
||||
|
||||
/* Log the current active interrupt mode */
|
||||
phba->intr_mode = intr_mode;
|
||||
lpfc_log_intr_mode(phba, intr_mode);
|
||||
|
||||
/* Perform post initialization setup */
|
||||
lpfc_post_init_setup(phba);
|
||||
|
|
|
@ -1115,6 +1115,13 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
|
|||
"0261 Cannot Register NameServer login\n");
|
||||
}
|
||||
|
||||
/*
|
||||
** In case the node reference counter does not go to zero, ensure that
|
||||
** the stale state for the node is not processed.
|
||||
*/
|
||||
|
||||
ndlp->nlp_prev_state = ndlp->nlp_state;
|
||||
lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE);
|
||||
spin_lock_irq(shost->host_lock);
|
||||
ndlp->nlp_flag |= NLP_DEFER_RM;
|
||||
spin_unlock_irq(shost->host_lock);
|
||||
|
@ -2159,13 +2166,16 @@ lpfc_cmpl_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
|
|||
{
|
||||
struct lpfc_iocbq *cmdiocb, *rspiocb;
|
||||
IOCB_t *irsp;
|
||||
struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
|
||||
|
||||
cmdiocb = (struct lpfc_iocbq *) arg;
|
||||
rspiocb = cmdiocb->context_un.rsp_iocb;
|
||||
|
||||
irsp = &rspiocb->iocb;
|
||||
if (irsp->ulpStatus) {
|
||||
spin_lock_irq(shost->host_lock);
|
||||
ndlp->nlp_flag |= NLP_DEFER_RM;
|
||||
spin_unlock_irq(shost->host_lock);
|
||||
return NLP_STE_FREED_NODE;
|
||||
}
|
||||
return ndlp->nlp_state;
|
||||
|
|
|
@ -287,6 +287,26 @@ lpfc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
|
|||
return sdev->queue_depth;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_change_queue_type() - Change a device's scsi tag queuing type
|
||||
* @sdev: Pointer the scsi device whose queue depth is to change
|
||||
* @tag_type: Identifier for queue tag type
|
||||
*/
|
||||
static int
|
||||
lpfc_change_queue_type(struct scsi_device *sdev, int tag_type)
|
||||
{
|
||||
if (sdev->tagged_supported) {
|
||||
scsi_set_tag_type(sdev, tag_type);
|
||||
if (tag_type)
|
||||
scsi_activate_tcq(sdev, sdev->queue_depth);
|
||||
else
|
||||
scsi_deactivate_tcq(sdev, sdev->queue_depth);
|
||||
} else
|
||||
tag_type = 0;
|
||||
|
||||
return tag_type;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_rampdown_queue_depth - Post RAMP_DOWN_QUEUE event to worker thread
|
||||
* @phba: The Hba for which this call is being executed.
|
||||
|
@ -3972,7 +3992,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
|
|||
break;
|
||||
}
|
||||
} else
|
||||
fcp_cmnd->fcpCntl1 = 0;
|
||||
fcp_cmnd->fcpCntl1 = SIMPLE_Q;
|
||||
|
||||
sli4 = (phba->sli_rev == LPFC_SLI_REV4);
|
||||
|
||||
|
@ -5150,6 +5170,7 @@ struct scsi_host_template lpfc_template = {
|
|||
.max_sectors = 0xFFFF,
|
||||
.vendor_id = LPFC_NL_VENDOR_ID,
|
||||
.change_queue_depth = lpfc_change_queue_depth,
|
||||
.change_queue_type = lpfc_change_queue_type,
|
||||
};
|
||||
|
||||
struct scsi_host_template lpfc_vport_template = {
|
||||
|
@ -5172,4 +5193,5 @@ struct scsi_host_template lpfc_vport_template = {
|
|||
.shost_attrs = lpfc_vport_attrs,
|
||||
.max_sectors = 0xFFFF,
|
||||
.change_queue_depth = lpfc_change_queue_depth,
|
||||
.change_queue_type = lpfc_change_queue_type,
|
||||
};
|
||||
|
|
|
@ -124,10 +124,17 @@ lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
|
|||
|
||||
/* Ring Doorbell */
|
||||
doorbell.word0 = 0;
|
||||
bf_set(lpfc_wq_doorbell_num_posted, &doorbell, 1);
|
||||
bf_set(lpfc_wq_doorbell_index, &doorbell, host_index);
|
||||
bf_set(lpfc_wq_doorbell_id, &doorbell, q->queue_id);
|
||||
writel(doorbell.word0, q->phba->sli4_hba.WQDBregaddr);
|
||||
if (q->db_format == LPFC_DB_LIST_FORMAT) {
|
||||
bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
|
||||
bf_set(lpfc_wq_db_list_fm_index, &doorbell, host_index);
|
||||
bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
|
||||
} else if (q->db_format == LPFC_DB_RING_FORMAT) {
|
||||
bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
|
||||
bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
writel(doorbell.word0, q->db_regaddr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -456,10 +463,20 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
|
|||
/* Ring The Header Receive Queue Doorbell */
|
||||
if (!(hq->host_index % hq->entry_repost)) {
|
||||
doorbell.word0 = 0;
|
||||
bf_set(lpfc_rq_doorbell_num_posted, &doorbell,
|
||||
hq->entry_repost);
|
||||
bf_set(lpfc_rq_doorbell_id, &doorbell, hq->queue_id);
|
||||
writel(doorbell.word0, hq->phba->sli4_hba.RQDBregaddr);
|
||||
if (hq->db_format == LPFC_DB_RING_FORMAT) {
|
||||
bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
|
||||
hq->entry_repost);
|
||||
bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
|
||||
} else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
|
||||
bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
|
||||
hq->entry_repost);
|
||||
bf_set(lpfc_rq_db_list_fm_index, &doorbell,
|
||||
hq->host_index);
|
||||
bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
writel(doorbell.word0, hq->db_regaddr);
|
||||
}
|
||||
return put_index;
|
||||
}
|
||||
|
@ -4939,7 +4956,7 @@ lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
|
|||
static void
|
||||
lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
|
||||
{
|
||||
uint8_t fcp_eqidx;
|
||||
int fcp_eqidx;
|
||||
|
||||
lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
|
||||
lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
|
||||
|
@ -5622,6 +5639,13 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
|
|||
}
|
||||
/* RPIs. */
|
||||
count = phba->sli4_hba.max_cfg_param.max_rpi;
|
||||
if (count <= 0) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
||||
"3279 Invalid provisioning of "
|
||||
"rpi:%d\n", count);
|
||||
rc = -EINVAL;
|
||||
goto err_exit;
|
||||
}
|
||||
base = phba->sli4_hba.max_cfg_param.rpi_base;
|
||||
longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
|
||||
phba->sli4_hba.rpi_bmask = kzalloc(longs *
|
||||
|
@ -5644,6 +5668,13 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
|
|||
|
||||
/* VPIs. */
|
||||
count = phba->sli4_hba.max_cfg_param.max_vpi;
|
||||
if (count <= 0) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
||||
"3280 Invalid provisioning of "
|
||||
"vpi:%d\n", count);
|
||||
rc = -EINVAL;
|
||||
goto free_rpi_ids;
|
||||
}
|
||||
base = phba->sli4_hba.max_cfg_param.vpi_base;
|
||||
longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
|
||||
phba->vpi_bmask = kzalloc(longs *
|
||||
|
@ -5666,6 +5697,13 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
|
|||
|
||||
/* XRIs. */
|
||||
count = phba->sli4_hba.max_cfg_param.max_xri;
|
||||
if (count <= 0) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
||||
"3281 Invalid provisioning of "
|
||||
"xri:%d\n", count);
|
||||
rc = -EINVAL;
|
||||
goto free_vpi_ids;
|
||||
}
|
||||
base = phba->sli4_hba.max_cfg_param.xri_base;
|
||||
longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
|
||||
phba->sli4_hba.xri_bmask = kzalloc(longs *
|
||||
|
@ -5689,6 +5727,13 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
|
|||
|
||||
/* VFIs. */
|
||||
count = phba->sli4_hba.max_cfg_param.max_vfi;
|
||||
if (count <= 0) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
|
||||
"3282 Invalid provisioning of "
|
||||
"vfi:%d\n", count);
|
||||
rc = -EINVAL;
|
||||
goto free_xri_ids;
|
||||
}
|
||||
base = phba->sli4_hba.max_cfg_param.vfi_base;
|
||||
longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
|
||||
phba->sli4_hba.vfi_bmask = kzalloc(longs *
|
||||
|
@ -8370,7 +8415,7 @@ __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
|
|||
* This is a continuation of a commandi,(CX) so this
|
||||
* sglq is on the active list
|
||||
*/
|
||||
sglq = __lpfc_get_active_sglq(phba, piocb->sli4_xritag);
|
||||
sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
|
||||
if (!sglq)
|
||||
return IOCB_ERROR;
|
||||
}
|
||||
|
@ -8855,12 +8900,6 @@ lpfc_sli_setup(struct lpfc_hba *phba)
|
|||
pring->prt[3].type = FC_TYPE_CT;
|
||||
pring->prt[3].lpfc_sli_rcv_unsol_event =
|
||||
lpfc_ct_unsol_event;
|
||||
/* abort unsolicited sequence */
|
||||
pring->prt[4].profile = 0; /* Mask 4 */
|
||||
pring->prt[4].rctl = FC_RCTL_BA_ABTS;
|
||||
pring->prt[4].type = FC_TYPE_BLS;
|
||||
pring->prt[4].lpfc_sli_rcv_unsol_event =
|
||||
lpfc_sli4_ct_abort_unsol_event;
|
||||
break;
|
||||
}
|
||||
totiocbsize += (pring->sli.sli3.numCiocb *
|
||||
|
@ -11873,7 +11912,7 @@ lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
|
|||
struct lpfc_eqe *eqe;
|
||||
unsigned long iflag;
|
||||
int ecount = 0;
|
||||
uint32_t fcp_eqidx;
|
||||
int fcp_eqidx;
|
||||
|
||||
/* Get the driver's phba structure from the dev_id */
|
||||
fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id;
|
||||
|
@ -11975,7 +12014,7 @@ lpfc_sli4_intr_handler(int irq, void *dev_id)
|
|||
struct lpfc_hba *phba;
|
||||
irqreturn_t hba_irq_rc;
|
||||
bool hba_handled = false;
|
||||
uint32_t fcp_eqidx;
|
||||
int fcp_eqidx;
|
||||
|
||||
/* Get the driver's phba structure from the dev_id */
|
||||
phba = (struct lpfc_hba *)dev_id;
|
||||
|
@ -12096,6 +12135,54 @@ lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
|
||||
* @phba: HBA structure that indicates port to create a queue on.
|
||||
* @pci_barset: PCI BAR set flag.
|
||||
*
|
||||
* This function shall perform iomap of the specified PCI BAR address to host
|
||||
* memory address if not already done so and return it. The returned host
|
||||
* memory address can be NULL.
|
||||
*/
|
||||
static void __iomem *
|
||||
lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
|
||||
{
|
||||
struct pci_dev *pdev;
|
||||
unsigned long bar_map, bar_map_len;
|
||||
|
||||
if (!phba->pcidev)
|
||||
return NULL;
|
||||
else
|
||||
pdev = phba->pcidev;
|
||||
|
||||
switch (pci_barset) {
|
||||
case WQ_PCI_BAR_0_AND_1:
|
||||
if (!phba->pci_bar0_memmap_p) {
|
||||
bar_map = pci_resource_start(pdev, PCI_64BIT_BAR0);
|
||||
bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR0);
|
||||
phba->pci_bar0_memmap_p = ioremap(bar_map, bar_map_len);
|
||||
}
|
||||
return phba->pci_bar0_memmap_p;
|
||||
case WQ_PCI_BAR_2_AND_3:
|
||||
if (!phba->pci_bar2_memmap_p) {
|
||||
bar_map = pci_resource_start(pdev, PCI_64BIT_BAR2);
|
||||
bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR2);
|
||||
phba->pci_bar2_memmap_p = ioremap(bar_map, bar_map_len);
|
||||
}
|
||||
return phba->pci_bar2_memmap_p;
|
||||
case WQ_PCI_BAR_4_AND_5:
|
||||
if (!phba->pci_bar4_memmap_p) {
|
||||
bar_map = pci_resource_start(pdev, PCI_64BIT_BAR4);
|
||||
bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR4);
|
||||
phba->pci_bar4_memmap_p = ioremap(bar_map, bar_map_len);
|
||||
}
|
||||
return phba->pci_bar4_memmap_p;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_modify_fcp_eq_delay - Modify Delay Multiplier on FCP EQs
|
||||
* @phba: HBA structure that indicates port to create a queue on.
|
||||
|
@ -12673,6 +12760,9 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
|
|||
union lpfc_sli4_cfg_shdr *shdr;
|
||||
uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
|
||||
struct dma_address *page;
|
||||
void __iomem *bar_memmap_p;
|
||||
uint32_t db_offset;
|
||||
uint16_t pci_barset;
|
||||
|
||||
/* sanity check on queue memory */
|
||||
if (!wq || !cq)
|
||||
|
@ -12696,6 +12786,7 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
|
|||
cq->queue_id);
|
||||
bf_set(lpfc_mbox_hdr_version, &shdr->request,
|
||||
phba->sli4_hba.pc_sli4_params.wqv);
|
||||
|
||||
if (phba->sli4_hba.pc_sli4_params.wqv == LPFC_Q_CREATE_VERSION_1) {
|
||||
bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
|
||||
wq->entry_count);
|
||||
|
@ -12723,6 +12814,10 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
|
|||
page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
|
||||
page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
|
||||
}
|
||||
|
||||
if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
|
||||
bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
|
||||
|
||||
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
|
||||
/* The IOCTL status is embedded in the mailbox subheader. */
|
||||
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
|
||||
|
@ -12740,6 +12835,47 @@ lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
|
|||
status = -ENXIO;
|
||||
goto out;
|
||||
}
|
||||
if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
|
||||
wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
|
||||
&wq_create->u.response);
|
||||
if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
|
||||
(wq->db_format != LPFC_DB_RING_FORMAT)) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"3265 WQ[%d] doorbell format not "
|
||||
"supported: x%x\n", wq->queue_id,
|
||||
wq->db_format);
|
||||
status = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
|
||||
&wq_create->u.response);
|
||||
bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
|
||||
if (!bar_memmap_p) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"3263 WQ[%d] failed to memmap pci "
|
||||
"barset:x%x\n", wq->queue_id,
|
||||
pci_barset);
|
||||
status = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
db_offset = wq_create->u.response.doorbell_offset;
|
||||
if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
|
||||
(db_offset != LPFC_ULP1_WQ_DOORBELL)) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"3252 WQ[%d] doorbell offset not "
|
||||
"supported: x%x\n", wq->queue_id,
|
||||
db_offset);
|
||||
status = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
wq->db_regaddr = bar_memmap_p + db_offset;
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
|
||||
"3264 WQ[%d]: barset:x%x, offset:x%x\n",
|
||||
wq->queue_id, pci_barset, db_offset);
|
||||
} else {
|
||||
wq->db_format = LPFC_DB_LIST_FORMAT;
|
||||
wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
|
||||
}
|
||||
wq->type = LPFC_WQ;
|
||||
wq->assoc_qid = cq->queue_id;
|
||||
wq->subtype = subtype;
|
||||
|
@ -12816,6 +12952,9 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
|
|||
uint32_t shdr_status, shdr_add_status;
|
||||
union lpfc_sli4_cfg_shdr *shdr;
|
||||
uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
|
||||
void __iomem *bar_memmap_p;
|
||||
uint32_t db_offset;
|
||||
uint16_t pci_barset;
|
||||
|
||||
/* sanity check on queue memory */
|
||||
if (!hrq || !drq || !cq)
|
||||
|
@ -12894,6 +13033,9 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
|
|||
rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
|
||||
putPaddrHigh(dmabuf->phys);
|
||||
}
|
||||
if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
|
||||
bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
|
||||
|
||||
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
|
||||
/* The IOCTL status is embedded in the mailbox subheader. */
|
||||
shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
|
||||
|
@ -12911,6 +13053,50 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
|
|||
status = -ENXIO;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
|
||||
hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
|
||||
&rq_create->u.response);
|
||||
if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
|
||||
(hrq->db_format != LPFC_DB_RING_FORMAT)) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"3262 RQ [%d] doorbell format not "
|
||||
"supported: x%x\n", hrq->queue_id,
|
||||
hrq->db_format);
|
||||
status = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
|
||||
&rq_create->u.response);
|
||||
bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
|
||||
if (!bar_memmap_p) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"3269 RQ[%d] failed to memmap pci "
|
||||
"barset:x%x\n", hrq->queue_id,
|
||||
pci_barset);
|
||||
status = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
db_offset = rq_create->u.response.doorbell_offset;
|
||||
if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
|
||||
(db_offset != LPFC_ULP1_RQ_DOORBELL)) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
|
||||
"3270 RQ[%d] doorbell offset not "
|
||||
"supported: x%x\n", hrq->queue_id,
|
||||
db_offset);
|
||||
status = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
hrq->db_regaddr = bar_memmap_p + db_offset;
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
|
||||
"3266 RQ[qid:%d]: barset:x%x, offset:x%x\n",
|
||||
hrq->queue_id, pci_barset, db_offset);
|
||||
} else {
|
||||
hrq->db_format = LPFC_DB_RING_FORMAT;
|
||||
hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
|
||||
}
|
||||
hrq->type = LPFC_HRQ;
|
||||
hrq->assoc_qid = cq->queue_id;
|
||||
hrq->subtype = subtype;
|
||||
|
@ -12976,6 +13162,8 @@ lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
|
|||
rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
|
||||
putPaddrHigh(dmabuf->phys);
|
||||
}
|
||||
if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
|
||||
bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
|
||||
rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
|
||||
/* The IOCTL status is embedded in the mailbox subheader. */
|
||||
shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
|
||||
|
@ -14062,6 +14250,40 @@ lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
|
|||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
|
||||
* @vport: pointer to a vitural port
|
||||
* @dmabuf: pointer to a dmabuf that describes the FC sequence
|
||||
*
|
||||
* This function tries to abort from the assembed sequence from upper level
|
||||
* protocol, described by the information from basic abbort @dmabuf. It
|
||||
* checks to see whether such pending context exists at upper level protocol.
|
||||
* If so, it shall clean up the pending context.
|
||||
*
|
||||
* Return
|
||||
* true -- if there is matching pending context of the sequence cleaned
|
||||
* at ulp;
|
||||
* false -- if there is no matching pending context of the sequence present
|
||||
* at ulp.
|
||||
**/
|
||||
static bool
|
||||
lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
|
||||
{
|
||||
struct lpfc_hba *phba = vport->phba;
|
||||
int handled;
|
||||
|
||||
/* Accepting abort at ulp with SLI4 only */
|
||||
if (phba->sli_rev < LPFC_SLI_REV4)
|
||||
return false;
|
||||
|
||||
/* Register all caring upper level protocols to attend abort */
|
||||
handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
|
||||
if (handled)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
|
||||
* @phba: Pointer to HBA context object.
|
||||
|
@ -14077,8 +14299,14 @@ lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
|
|||
struct lpfc_iocbq *cmd_iocbq,
|
||||
struct lpfc_iocbq *rsp_iocbq)
|
||||
{
|
||||
if (cmd_iocbq)
|
||||
struct lpfc_nodelist *ndlp;
|
||||
|
||||
if (cmd_iocbq) {
|
||||
ndlp = (struct lpfc_nodelist *)cmd_iocbq->context1;
|
||||
lpfc_nlp_put(ndlp);
|
||||
lpfc_nlp_not_used(ndlp);
|
||||
lpfc_sli_release_iocbq(phba, cmd_iocbq);
|
||||
}
|
||||
|
||||
/* Failure means BLS ABORT RSP did not get delivered to remote node*/
|
||||
if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
|
||||
|
@ -14118,9 +14346,10 @@ lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
|
|||
* event after aborting the sequence handling.
|
||||
**/
|
||||
static void
|
||||
lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
|
||||
struct fc_frame_header *fc_hdr)
|
||||
lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
|
||||
struct fc_frame_header *fc_hdr, bool aborted)
|
||||
{
|
||||
struct lpfc_hba *phba = vport->phba;
|
||||
struct lpfc_iocbq *ctiocb = NULL;
|
||||
struct lpfc_nodelist *ndlp;
|
||||
uint16_t oxid, rxid, xri, lxri;
|
||||
|
@ -14135,12 +14364,27 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
|
|||
oxid = be16_to_cpu(fc_hdr->fh_ox_id);
|
||||
rxid = be16_to_cpu(fc_hdr->fh_rx_id);
|
||||
|
||||
ndlp = lpfc_findnode_did(phba->pport, sid);
|
||||
ndlp = lpfc_findnode_did(vport, sid);
|
||||
if (!ndlp) {
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
|
||||
"1268 Find ndlp returned NULL for oxid:x%x "
|
||||
"SID:x%x\n", oxid, sid);
|
||||
return;
|
||||
ndlp = mempool_alloc(phba->nlp_mem_pool, GFP_KERNEL);
|
||||
if (!ndlp) {
|
||||
lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
|
||||
"1268 Failed to allocate ndlp for "
|
||||
"oxid:x%x SID:x%x\n", oxid, sid);
|
||||
return;
|
||||
}
|
||||
lpfc_nlp_init(vport, ndlp, sid);
|
||||
/* Put ndlp onto pport node list */
|
||||
lpfc_enqueue_node(vport, ndlp);
|
||||
} else if (!NLP_CHK_NODE_ACT(ndlp)) {
|
||||
/* re-setup ndlp without removing from node list */
|
||||
ndlp = lpfc_enable_node(vport, ndlp, NLP_STE_UNUSED_NODE);
|
||||
if (!ndlp) {
|
||||
lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
|
||||
"3275 Failed to active ndlp found "
|
||||
"for oxid:x%x SID:x%x\n", oxid, sid);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/* Allocate buffer for rsp iocb */
|
||||
|
@ -14164,7 +14408,7 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
|
|||
icmd->ulpLe = 1;
|
||||
icmd->ulpClass = CLASS3;
|
||||
icmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
|
||||
ctiocb->context1 = ndlp;
|
||||
ctiocb->context1 = lpfc_nlp_get(ndlp);
|
||||
|
||||
ctiocb->iocb_cmpl = NULL;
|
||||
ctiocb->vport = phba->pport;
|
||||
|
@ -14183,14 +14427,24 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
|
|||
if (lxri != NO_XRI)
|
||||
lpfc_set_rrq_active(phba, ndlp, lxri,
|
||||
(xri == oxid) ? rxid : oxid, 0);
|
||||
/* If the oxid maps to the FCP XRI range or if it is out of range,
|
||||
* send a BLS_RJT. The driver no longer has that exchange.
|
||||
* Override the IOCB for a BA_RJT.
|
||||
/* For BA_ABTS from exchange responder, if the logical xri with
|
||||
* the oxid maps to the FCP XRI range, the port no longer has
|
||||
* that exchange context, send a BLS_RJT. Override the IOCB for
|
||||
* a BA_RJT.
|
||||
*/
|
||||
if (xri > (phba->sli4_hba.max_cfg_param.max_xri +
|
||||
phba->sli4_hba.max_cfg_param.xri_base) ||
|
||||
xri > (lpfc_sli4_get_els_iocb_cnt(phba) +
|
||||
phba->sli4_hba.max_cfg_param.xri_base)) {
|
||||
if ((fctl & FC_FC_EX_CTX) &&
|
||||
(lxri > lpfc_sli4_get_els_iocb_cnt(phba))) {
|
||||
icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
|
||||
bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
|
||||
bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
|
||||
bf_set(lpfc_rsn_code, &icmd->un.bls_rsp, FC_BA_RJT_UNABLE);
|
||||
}
|
||||
|
||||
/* If BA_ABTS failed to abort a partially assembled receive sequence,
|
||||
* the driver no longer has that exchange, send a BLS_RJT. Override
|
||||
* the IOCB for a BA_RJT.
|
||||
*/
|
||||
if (aborted == false) {
|
||||
icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_BA_RJT;
|
||||
bf_set(lpfc_vndr_code, &icmd->un.bls_rsp, 0);
|
||||
bf_set(lpfc_rsn_expln, &icmd->un.bls_rsp, FC_BA_RJT_INV_XID);
|
||||
|
@ -14214,17 +14468,19 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_hba *phba,
|
|||
bf_set(lpfc_abts_oxid, &icmd->un.bls_rsp, oxid);
|
||||
|
||||
/* Xmit CT abts response on exchange <xid> */
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
|
||||
"1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
|
||||
icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
|
||||
lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
|
||||
"1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
|
||||
icmd->un.xseq64.w5.hcsw.Rctl, oxid, phba->link_state);
|
||||
|
||||
rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
|
||||
if (rc == IOCB_ERROR) {
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
|
||||
"2925 Failed to issue CT ABTS RSP x%x on "
|
||||
"xri x%x, Data x%x\n",
|
||||
icmd->un.xseq64.w5.hcsw.Rctl, oxid,
|
||||
phba->link_state);
|
||||
lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
|
||||
"2925 Failed to issue CT ABTS RSP x%x on "
|
||||
"xri x%x, Data x%x\n",
|
||||
icmd->un.xseq64.w5.hcsw.Rctl, oxid,
|
||||
phba->link_state);
|
||||
lpfc_nlp_put(ndlp);
|
||||
ctiocb->context1 = NULL;
|
||||
lpfc_sli_release_iocbq(phba, ctiocb);
|
||||
}
|
||||
}
|
||||
|
@ -14249,32 +14505,25 @@ lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
|
|||
struct lpfc_hba *phba = vport->phba;
|
||||
struct fc_frame_header fc_hdr;
|
||||
uint32_t fctl;
|
||||
bool abts_par;
|
||||
bool aborted;
|
||||
|
||||
/* Make a copy of fc_hdr before the dmabuf being released */
|
||||
memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
|
||||
fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
|
||||
|
||||
if (fctl & FC_FC_EX_CTX) {
|
||||
/*
|
||||
* ABTS sent by responder to exchange, just free the buffer
|
||||
*/
|
||||
lpfc_in_buf_free(phba, &dmabuf->dbuf);
|
||||
/* ABTS by responder to exchange, no cleanup needed */
|
||||
aborted = true;
|
||||
} else {
|
||||
/*
|
||||
* ABTS sent by initiator to exchange, need to do cleanup
|
||||
*/
|
||||
/* Try to abort partially assembled seq */
|
||||
abts_par = lpfc_sli4_abort_partial_seq(vport, dmabuf);
|
||||
|
||||
/* Send abort to ULP if partially seq abort failed */
|
||||
if (abts_par == false)
|
||||
lpfc_sli4_send_seq_to_ulp(vport, dmabuf);
|
||||
else
|
||||
lpfc_in_buf_free(phba, &dmabuf->dbuf);
|
||||
/* ABTS by initiator to exchange, need to do cleanup */
|
||||
aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
|
||||
if (aborted == false)
|
||||
aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
|
||||
}
|
||||
/* Send basic accept (BA_ACC) to the abort requester */
|
||||
lpfc_sli4_seq_abort_rsp(phba, &fc_hdr);
|
||||
lpfc_in_buf_free(phba, &dmabuf->dbuf);
|
||||
|
||||
/* Respond with BA_ACC or BA_RJT accordingly */
|
||||
lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -15307,10 +15556,13 @@ lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
|
|||
{
|
||||
uint16_t next_fcf_index;
|
||||
|
||||
initial_priority:
|
||||
/* Search start from next bit of currently registered FCF index */
|
||||
next_fcf_index = phba->fcf.current_rec.fcf_indx;
|
||||
|
||||
next_priority:
|
||||
next_fcf_index = (phba->fcf.current_rec.fcf_indx + 1) %
|
||||
LPFC_SLI4_FCF_TBL_INDX_MAX;
|
||||
/* Determine the next fcf index to check */
|
||||
next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
|
||||
next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
|
||||
LPFC_SLI4_FCF_TBL_INDX_MAX,
|
||||
next_fcf_index);
|
||||
|
@ -15337,7 +15589,7 @@ lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
|
|||
* at that level and continue the selection process.
|
||||
*/
|
||||
if (lpfc_check_next_fcf_pri_level(phba))
|
||||
goto next_priority;
|
||||
goto initial_priority;
|
||||
lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
|
||||
"2844 No roundrobin failover FCF available\n");
|
||||
if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
|
||||
|
|
|
@ -139,6 +139,10 @@ struct lpfc_queue {
|
|||
|
||||
struct lpfc_sli_ring *pring; /* ptr to io ring associated with q */
|
||||
|
||||
uint16_t db_format;
|
||||
#define LPFC_DB_RING_FORMAT 0x01
|
||||
#define LPFC_DB_LIST_FORMAT 0x02
|
||||
void __iomem *db_regaddr;
|
||||
/* For q stats */
|
||||
uint32_t q_cnt_1;
|
||||
uint32_t q_cnt_2;
|
||||
|
@ -508,6 +512,10 @@ struct lpfc_sli4_hba {
|
|||
struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */
|
||||
struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */
|
||||
|
||||
uint8_t fw_func_mode; /* FW function protocol mode */
|
||||
uint32_t ulp0_mode; /* ULP0 protocol mode */
|
||||
uint32_t ulp1_mode; /* ULP1 protocol mode */
|
||||
|
||||
/* Setup information for various queue parameters */
|
||||
int eq_esize;
|
||||
int eq_ecount;
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
* included with this package. *
|
||||
*******************************************************************/
|
||||
|
||||
#define LPFC_DRIVER_VERSION "8.3.36"
|
||||
#define LPFC_DRIVER_VERSION "8.3.37"
|
||||
#define LPFC_DRIVER_NAME "lpfc"
|
||||
|
||||
/* Used for SLI 2/3 */
|
||||
|
|
|
@ -155,7 +155,7 @@ _base_fault_reset_work(struct work_struct *work)
|
|||
struct task_struct *p;
|
||||
|
||||
spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
|
||||
if (ioc->shost_recovery)
|
||||
if (ioc->shost_recovery || ioc->pci_error_recovery)
|
||||
goto rearm_timer;
|
||||
spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
|
||||
|
||||
|
@ -164,6 +164,20 @@ _base_fault_reset_work(struct work_struct *work)
|
|||
printk(MPT2SAS_INFO_FMT "%s : SAS host is non-operational !!!!\n",
|
||||
ioc->name, __func__);
|
||||
|
||||
/* It may be possible that EEH recovery can resolve some of
|
||||
* pci bus failure issues rather removing the dead ioc function
|
||||
* by considering controller is in a non-operational state. So
|
||||
* here priority is given to the EEH recovery. If it doesn't
|
||||
* not resolve this issue, mpt2sas driver will consider this
|
||||
* controller to non-operational state and remove the dead ioc
|
||||
* function.
|
||||
*/
|
||||
if (ioc->non_operational_loop++ < 5) {
|
||||
spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock,
|
||||
flags);
|
||||
goto rearm_timer;
|
||||
}
|
||||
|
||||
/*
|
||||
* Call _scsih_flush_pending_cmds callback so that we flush all
|
||||
* pending commands back to OS. This call is required to aovid
|
||||
|
@ -193,6 +207,8 @@ _base_fault_reset_work(struct work_struct *work)
|
|||
return; /* don't rearm timer */
|
||||
}
|
||||
|
||||
ioc->non_operational_loop = 0;
|
||||
|
||||
if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
|
||||
rc = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
|
||||
FORCE_BIG_HAMMER);
|
||||
|
@ -4386,6 +4402,7 @@ mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
|
|||
if (missing_delay[0] != -1 && missing_delay[1] != -1)
|
||||
_base_update_missing_delay(ioc, missing_delay[0],
|
||||
missing_delay[1]);
|
||||
ioc->non_operational_loop = 0;
|
||||
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -835,6 +835,7 @@ struct MPT2SAS_ADAPTER {
|
|||
u16 cpu_msix_table_sz;
|
||||
u32 ioc_reset_count;
|
||||
MPT2SAS_FLUSH_RUNNING_CMDS schedule_dead_ioc_flush_running_cmds;
|
||||
u32 non_operational_loop;
|
||||
|
||||
/* internal commands, callback index */
|
||||
u8 scsi_io_cb_idx;
|
||||
|
|
|
@ -42,7 +42,6 @@
|
|||
* USA.
|
||||
*/
|
||||
|
||||
#include <linux/version.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/errno.h>
|
||||
|
@ -1310,7 +1309,6 @@ _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
|
|||
void *sg_local, *chain;
|
||||
u32 chain_offset;
|
||||
u32 chain_length;
|
||||
u32 chain_flags;
|
||||
int sges_left;
|
||||
u32 sges_in_segment;
|
||||
u8 simple_sgl_flags;
|
||||
|
@ -1356,8 +1354,7 @@ _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
|
|||
sges_in_segment--;
|
||||
}
|
||||
|
||||
/* initializing the chain flags and pointers */
|
||||
chain_flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT << MPI2_SGE_FLAGS_SHIFT;
|
||||
/* initializing the pointers */
|
||||
chain_req = _base_get_chain_buffer_tracker(ioc, smid);
|
||||
if (!chain_req)
|
||||
return -1;
|
||||
|
|
|
@ -41,7 +41,6 @@
|
|||
* USA.
|
||||
*/
|
||||
|
||||
#include <linux/version.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
|
|
|
@ -42,7 +42,6 @@
|
|||
* USA.
|
||||
*/
|
||||
|
||||
#include <linux/version.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/errno.h>
|
||||
|
@ -3136,7 +3135,7 @@ _ctl_diag_trigger_mpi_store(struct device *cdev,
|
|||
spin_lock_irqsave(&ioc->diag_trigger_lock, flags);
|
||||
sz = min(sizeof(struct SL_WH_MPI_TRIGGERS_T), count);
|
||||
memset(&ioc->diag_trigger_mpi, 0,
|
||||
sizeof(struct SL_WH_EVENT_TRIGGERS_T));
|
||||
sizeof(ioc->diag_trigger_mpi));
|
||||
memcpy(&ioc->diag_trigger_mpi, buf, sz);
|
||||
if (ioc->diag_trigger_mpi.ValidEntries > NUM_VALID_ENTRIES)
|
||||
ioc->diag_trigger_mpi.ValidEntries = NUM_VALID_ENTRIES;
|
||||
|
|
|
@ -41,7 +41,6 @@
|
|||
* USA.
|
||||
*/
|
||||
|
||||
#include <linux/version.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
|
@ -2755,13 +2754,11 @@ _scsih_block_io_to_children_attached_directly(struct MPT3SAS_ADAPTER *ioc,
|
|||
int i;
|
||||
u16 handle;
|
||||
u16 reason_code;
|
||||
u8 phy_number;
|
||||
|
||||
for (i = 0; i < event_data->NumEntries; i++) {
|
||||
handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
|
||||
if (!handle)
|
||||
continue;
|
||||
phy_number = event_data->StartPhyNum + i;
|
||||
reason_code = event_data->PHY[i].PhyStatus &
|
||||
MPI2_EVENT_SAS_TOPO_RC_MASK;
|
||||
if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING)
|
||||
|
|
|
@ -42,7 +42,6 @@
|
|||
* USA.
|
||||
*/
|
||||
|
||||
#include <linux/version.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/errno.h>
|
||||
|
|
|
@ -538,7 +538,7 @@ struct device_info {
|
|||
int port_num;
|
||||
};
|
||||
|
||||
static int qla4_83xx_can_perform_reset(struct scsi_qla_host *ha)
|
||||
int qla4_83xx_can_perform_reset(struct scsi_qla_host *ha)
|
||||
{
|
||||
uint32_t drv_active;
|
||||
uint32_t dev_part, dev_part1, dev_part2;
|
||||
|
@ -1351,31 +1351,58 @@ int qla4_83xx_start_firmware(struct scsi_qla_host *ha)
|
|||
|
||||
/*----------------------Interrupt Related functions ---------------------*/
|
||||
|
||||
void qla4_83xx_disable_intrs(struct scsi_qla_host *ha)
|
||||
static void qla4_83xx_disable_iocb_intrs(struct scsi_qla_host *ha)
|
||||
{
|
||||
if (test_and_clear_bit(AF_83XX_IOCB_INTR_ON, &ha->flags))
|
||||
qla4_8xxx_intr_disable(ha);
|
||||
}
|
||||
|
||||
static void qla4_83xx_disable_mbox_intrs(struct scsi_qla_host *ha)
|
||||
{
|
||||
uint32_t mb_int, ret;
|
||||
|
||||
if (test_and_clear_bit(AF_INTERRUPTS_ON, &ha->flags))
|
||||
qla4_8xxx_mbx_intr_disable(ha);
|
||||
|
||||
ret = readl(&ha->qla4_83xx_reg->mbox_int);
|
||||
mb_int = ret & ~INT_ENABLE_FW_MB;
|
||||
writel(mb_int, &ha->qla4_83xx_reg->mbox_int);
|
||||
writel(1, &ha->qla4_83xx_reg->leg_int_mask);
|
||||
if (test_and_clear_bit(AF_83XX_MBOX_INTR_ON, &ha->flags)) {
|
||||
ret = readl(&ha->qla4_83xx_reg->mbox_int);
|
||||
mb_int = ret & ~INT_ENABLE_FW_MB;
|
||||
writel(mb_int, &ha->qla4_83xx_reg->mbox_int);
|
||||
writel(1, &ha->qla4_83xx_reg->leg_int_mask);
|
||||
}
|
||||
}
|
||||
|
||||
void qla4_83xx_enable_intrs(struct scsi_qla_host *ha)
|
||||
void qla4_83xx_disable_intrs(struct scsi_qla_host *ha)
|
||||
{
|
||||
qla4_83xx_disable_mbox_intrs(ha);
|
||||
qla4_83xx_disable_iocb_intrs(ha);
|
||||
}
|
||||
|
||||
static void qla4_83xx_enable_iocb_intrs(struct scsi_qla_host *ha)
|
||||
{
|
||||
if (!test_bit(AF_83XX_IOCB_INTR_ON, &ha->flags)) {
|
||||
qla4_8xxx_intr_enable(ha);
|
||||
set_bit(AF_83XX_IOCB_INTR_ON, &ha->flags);
|
||||
}
|
||||
}
|
||||
|
||||
void qla4_83xx_enable_mbox_intrs(struct scsi_qla_host *ha)
|
||||
{
|
||||
uint32_t mb_int;
|
||||
|
||||
qla4_8xxx_mbx_intr_enable(ha);
|
||||
mb_int = INT_ENABLE_FW_MB;
|
||||
writel(mb_int, &ha->qla4_83xx_reg->mbox_int);
|
||||
writel(0, &ha->qla4_83xx_reg->leg_int_mask);
|
||||
|
||||
set_bit(AF_INTERRUPTS_ON, &ha->flags);
|
||||
if (!test_bit(AF_83XX_MBOX_INTR_ON, &ha->flags)) {
|
||||
mb_int = INT_ENABLE_FW_MB;
|
||||
writel(mb_int, &ha->qla4_83xx_reg->mbox_int);
|
||||
writel(0, &ha->qla4_83xx_reg->leg_int_mask);
|
||||
set_bit(AF_83XX_MBOX_INTR_ON, &ha->flags);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void qla4_83xx_enable_intrs(struct scsi_qla_host *ha)
|
||||
{
|
||||
qla4_83xx_enable_mbox_intrs(ha);
|
||||
qla4_83xx_enable_iocb_intrs(ha);
|
||||
}
|
||||
|
||||
|
||||
void qla4_83xx_queue_mbox_cmd(struct scsi_qla_host *ha, uint32_t *mbx_cmd,
|
||||
int incount)
|
||||
{
|
||||
|
|
|
@ -74,16 +74,22 @@ qla4_8xxx_sysfs_write_fw_dump(struct file *filep, struct kobject *kobj,
|
|||
}
|
||||
break;
|
||||
case 2:
|
||||
/* Reset HBA */
|
||||
/* Reset HBA and collect FW dump */
|
||||
ha->isp_ops->idc_lock(ha);
|
||||
dev_state = qla4_8xxx_rd_direct(ha, QLA8XXX_CRB_DEV_STATE);
|
||||
if (dev_state == QLA8XXX_DEV_READY) {
|
||||
ql4_printk(KERN_INFO, ha,
|
||||
"%s: Setting Need reset, reset_owner is 0x%x.\n",
|
||||
__func__, ha->func_num);
|
||||
ql4_printk(KERN_INFO, ha, "%s: Setting Need reset\n",
|
||||
__func__);
|
||||
qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DEV_STATE,
|
||||
QLA8XXX_DEV_NEED_RESET);
|
||||
set_bit(AF_8XXX_RST_OWNER, &ha->flags);
|
||||
if (is_qla8022(ha) ||
|
||||
(is_qla8032(ha) &&
|
||||
qla4_83xx_can_perform_reset(ha))) {
|
||||
set_bit(AF_8XXX_RST_OWNER, &ha->flags);
|
||||
set_bit(AF_FW_RECOVERY, &ha->flags);
|
||||
ql4_printk(KERN_INFO, ha, "%s: Reset owner is 0x%x\n",
|
||||
__func__, ha->func_num);
|
||||
}
|
||||
} else
|
||||
ql4_printk(KERN_INFO, ha,
|
||||
"%s: Reset not performed as device state is 0x%x\n",
|
||||
|
|
|
@ -136,6 +136,7 @@
|
|||
#define RESPONSE_QUEUE_DEPTH 64
|
||||
#define QUEUE_SIZE 64
|
||||
#define DMA_BUFFER_SIZE 512
|
||||
#define IOCB_HIWAT_CUSHION 4
|
||||
|
||||
/*
|
||||
* Misc
|
||||
|
@ -180,6 +181,7 @@
|
|||
#define DISABLE_ACB_TOV 30
|
||||
#define IP_CONFIG_TOV 30
|
||||
#define LOGIN_TOV 12
|
||||
#define BOOT_LOGIN_RESP_TOV 60
|
||||
|
||||
#define MAX_RESET_HA_RETRIES 2
|
||||
#define FW_ALIVE_WAIT_TOV 3
|
||||
|
@ -314,6 +316,7 @@ struct ql4_tuple_ddb {
|
|||
* DDB flags.
|
||||
*/
|
||||
#define DF_RELOGIN 0 /* Relogin to device */
|
||||
#define DF_BOOT_TGT 1 /* Boot target entry */
|
||||
#define DF_ISNS_DISCOVERED 2 /* Device was discovered via iSNS */
|
||||
#define DF_FO_MASKED 3
|
||||
|
||||
|
@ -501,6 +504,7 @@ struct scsi_qla_host {
|
|||
#define AF_INTERRUPTS_ON 6 /* 0x00000040 */
|
||||
#define AF_GET_CRASH_RECORD 7 /* 0x00000080 */
|
||||
#define AF_LINK_UP 8 /* 0x00000100 */
|
||||
#define AF_LOOPBACK 9 /* 0x00000200 */
|
||||
#define AF_IRQ_ATTACHED 10 /* 0x00000400 */
|
||||
#define AF_DISABLE_ACB_COMPLETE 11 /* 0x00000800 */
|
||||
#define AF_HA_REMOVAL 12 /* 0x00001000 */
|
||||
|
@ -516,6 +520,8 @@ struct scsi_qla_host {
|
|||
#define AF_8XXX_RST_OWNER 25 /* 0x02000000 */
|
||||
#define AF_82XX_DUMP_READING 26 /* 0x04000000 */
|
||||
#define AF_83XX_NO_FW_DUMP 27 /* 0x08000000 */
|
||||
#define AF_83XX_IOCB_INTR_ON 28 /* 0x10000000 */
|
||||
#define AF_83XX_MBOX_INTR_ON 29 /* 0x20000000 */
|
||||
|
||||
unsigned long dpc_flags;
|
||||
|
||||
|
@ -537,6 +543,7 @@ struct scsi_qla_host {
|
|||
uint32_t tot_ddbs;
|
||||
|
||||
uint16_t iocb_cnt;
|
||||
uint16_t iocb_hiwat;
|
||||
|
||||
/* SRB cache. */
|
||||
#define SRB_MIN_REQ 128
|
||||
|
@ -838,7 +845,8 @@ static inline int is_aer_supported(struct scsi_qla_host *ha)
|
|||
static inline int adapter_up(struct scsi_qla_host *ha)
|
||||
{
|
||||
return (test_bit(AF_ONLINE, &ha->flags) != 0) &&
|
||||
(test_bit(AF_LINK_UP, &ha->flags) != 0);
|
||||
(test_bit(AF_LINK_UP, &ha->flags) != 0) &&
|
||||
(!test_bit(AF_LOOPBACK, &ha->flags));
|
||||
}
|
||||
|
||||
static inline struct scsi_qla_host* to_qla_host(struct Scsi_Host *shost)
|
||||
|
|
|
@ -495,7 +495,7 @@ struct qla_flt_region {
|
|||
#define MBOX_ASTS_IPV6_LCL_PREFIX_IGNORED 0x802D
|
||||
#define MBOX_ASTS_ICMPV6_ERROR_MSG_RCVD 0x802E
|
||||
#define MBOX_ASTS_IDC_COMPLETE 0x8100
|
||||
#define MBOX_ASTS_IDC_NOTIFY 0x8101
|
||||
#define MBOX_ASTS_IDC_REQUEST_NOTIFICATION 0x8101
|
||||
#define MBOX_ASTS_TXSCVR_INSERTED 0x8130
|
||||
#define MBOX_ASTS_TXSCVR_REMOVED 0x8131
|
||||
|
||||
|
@ -522,6 +522,10 @@ struct qla_flt_region {
|
|||
#define FLASH_OPT_COMMIT 2
|
||||
#define FLASH_OPT_RMW_COMMIT 3
|
||||
|
||||
/* Loopback type */
|
||||
#define ENABLE_INTERNAL_LOOPBACK 0x04
|
||||
#define ENABLE_EXTERNAL_LOOPBACK 0x08
|
||||
|
||||
/*************************************************************************/
|
||||
|
||||
/* Host Adapter Initialization Control Block (from host) */
|
||||
|
|
|
@ -253,12 +253,14 @@ void qla4_8xxx_set_rst_ready(struct scsi_qla_host *ha);
|
|||
void qla4_8xxx_clear_rst_ready(struct scsi_qla_host *ha);
|
||||
int qla4_8xxx_device_bootstrap(struct scsi_qla_host *ha);
|
||||
void qla4_8xxx_get_minidump(struct scsi_qla_host *ha);
|
||||
int qla4_8xxx_mbx_intr_disable(struct scsi_qla_host *ha);
|
||||
int qla4_8xxx_mbx_intr_enable(struct scsi_qla_host *ha);
|
||||
int qla4_8xxx_intr_disable(struct scsi_qla_host *ha);
|
||||
int qla4_8xxx_intr_enable(struct scsi_qla_host *ha);
|
||||
int qla4_8xxx_set_param(struct scsi_qla_host *ha, int param);
|
||||
int qla4_8xxx_update_idc_reg(struct scsi_qla_host *ha);
|
||||
int qla4_83xx_post_idc_ack(struct scsi_qla_host *ha);
|
||||
void qla4_83xx_disable_pause(struct scsi_qla_host *ha);
|
||||
void qla4_83xx_enable_mbox_intrs(struct scsi_qla_host *ha);
|
||||
int qla4_83xx_can_perform_reset(struct scsi_qla_host *ha);
|
||||
|
||||
extern int ql4xextended_error_logging;
|
||||
extern int ql4xdontresethba;
|
||||
|
|
|
@ -195,12 +195,10 @@ int qla4xxx_get_sys_info(struct scsi_qla_host *ha)
|
|||
* @ha: pointer to host adapter structure.
|
||||
*
|
||||
**/
|
||||
static int qla4xxx_init_local_data(struct scsi_qla_host *ha)
|
||||
static void qla4xxx_init_local_data(struct scsi_qla_host *ha)
|
||||
{
|
||||
/* Initialize aen queue */
|
||||
ha->aen_q_count = MAX_AEN_ENTRIES;
|
||||
|
||||
return qla4xxx_get_firmware_status(ha);
|
||||
}
|
||||
|
||||
static uint8_t
|
||||
|
@ -935,14 +933,23 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, int is_reset)
|
|||
if (ha->isp_ops->start_firmware(ha) == QLA_ERROR)
|
||||
goto exit_init_hba;
|
||||
|
||||
/*
|
||||
* For ISP83XX, mailbox and IOCB interrupts are enabled separately.
|
||||
* Mailbox interrupts must be enabled prior to issuing any mailbox
|
||||
* command in order to prevent the possibility of losing interrupts
|
||||
* while switching from polling to interrupt mode. IOCB interrupts are
|
||||
* enabled via isp_ops->enable_intrs.
|
||||
*/
|
||||
if (is_qla8032(ha))
|
||||
qla4_83xx_enable_mbox_intrs(ha);
|
||||
|
||||
if (qla4xxx_about_firmware(ha) == QLA_ERROR)
|
||||
goto exit_init_hba;
|
||||
|
||||
if (ha->isp_ops->get_sys_info(ha) == QLA_ERROR)
|
||||
goto exit_init_hba;
|
||||
|
||||
if (qla4xxx_init_local_data(ha) == QLA_ERROR)
|
||||
goto exit_init_hba;
|
||||
qla4xxx_init_local_data(ha);
|
||||
|
||||
status = qla4xxx_init_firmware(ha);
|
||||
if (status == QLA_ERROR)
|
||||
|
|
|
@ -316,7 +316,7 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
|
|||
goto queuing_error;
|
||||
|
||||
/* total iocbs active */
|
||||
if ((ha->iocb_cnt + req_cnt) >= REQUEST_QUEUE_DEPTH)
|
||||
if ((ha->iocb_cnt + req_cnt) >= ha->iocb_hiwat)
|
||||
goto queuing_error;
|
||||
|
||||
/* Build command packet */
|
||||
|
|
|
@ -581,6 +581,33 @@ void qla4xxx_process_response_queue(struct scsi_qla_host *ha)
|
|||
set_bit(DPC_RESET_HA, &ha->dpc_flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* qla4_83xx_loopback_in_progress: Is loopback in progress?
|
||||
* @ha: Pointer to host adapter structure.
|
||||
* @ret: 1 = loopback in progress, 0 = loopback not in progress
|
||||
**/
|
||||
static int qla4_83xx_loopback_in_progress(struct scsi_qla_host *ha)
|
||||
{
|
||||
int rval = 1;
|
||||
|
||||
if (is_qla8032(ha)) {
|
||||
if ((ha->idc_info.info2 & ENABLE_INTERNAL_LOOPBACK) ||
|
||||
(ha->idc_info.info2 & ENABLE_EXTERNAL_LOOPBACK)) {
|
||||
DEBUG2(ql4_printk(KERN_INFO, ha,
|
||||
"%s: Loopback diagnostics in progress\n",
|
||||
__func__));
|
||||
rval = 1;
|
||||
} else {
|
||||
DEBUG2(ql4_printk(KERN_INFO, ha,
|
||||
"%s: Loopback diagnostics not in progress\n",
|
||||
__func__));
|
||||
rval = 0;
|
||||
}
|
||||
}
|
||||
|
||||
return rval;
|
||||
}
|
||||
|
||||
/**
|
||||
* qla4xxx_isr_decode_mailbox - decodes mailbox status
|
||||
* @ha: Pointer to host adapter structure.
|
||||
|
@ -676,8 +703,10 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
|
|||
|
||||
case MBOX_ASTS_LINK_DOWN:
|
||||
clear_bit(AF_LINK_UP, &ha->flags);
|
||||
if (test_bit(AF_INIT_DONE, &ha->flags))
|
||||
if (test_bit(AF_INIT_DONE, &ha->flags)) {
|
||||
set_bit(DPC_LINK_CHANGED, &ha->dpc_flags);
|
||||
qla4xxx_wake_dpc(ha);
|
||||
}
|
||||
|
||||
ql4_printk(KERN_INFO, ha, "%s: LINK DOWN\n", __func__);
|
||||
qla4xxx_post_aen_work(ha, ISCSI_EVENT_LINKDOWN,
|
||||
|
@ -806,7 +835,7 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
|
|||
" removed\n", ha->host_no, mbox_sts[0]));
|
||||
break;
|
||||
|
||||
case MBOX_ASTS_IDC_NOTIFY:
|
||||
case MBOX_ASTS_IDC_REQUEST_NOTIFICATION:
|
||||
{
|
||||
uint32_t opcode;
|
||||
if (is_qla8032(ha)) {
|
||||
|
@ -840,6 +869,11 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
|
|||
DEBUG2(ql4_printk(KERN_INFO, ha,
|
||||
"scsi:%ld: AEN %04x IDC Complete notification\n",
|
||||
ha->host_no, mbox_sts[0]));
|
||||
|
||||
if (qla4_83xx_loopback_in_progress(ha))
|
||||
set_bit(AF_LOOPBACK, &ha->flags);
|
||||
else
|
||||
clear_bit(AF_LOOPBACK, &ha->flags);
|
||||
}
|
||||
break;
|
||||
|
||||
|
@ -1124,17 +1158,18 @@ irqreturn_t qla4_83xx_intr_handler(int irq, void *dev_id)
|
|||
|
||||
/* Legacy interrupt is valid if bit31 of leg_int_ptr is set */
|
||||
if (!(leg_int_ptr & LEG_INT_PTR_B31)) {
|
||||
ql4_printk(KERN_ERR, ha,
|
||||
"%s: Legacy Interrupt Bit 31 not set, spurious interrupt!\n",
|
||||
__func__);
|
||||
DEBUG2(ql4_printk(KERN_ERR, ha,
|
||||
"%s: Legacy Interrupt Bit 31 not set, spurious interrupt!\n",
|
||||
__func__));
|
||||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
/* Validate the PCIE function ID set in leg_int_ptr bits [19..16] */
|
||||
if ((leg_int_ptr & PF_BITS_MASK) != ha->pf_bit) {
|
||||
ql4_printk(KERN_ERR, ha,
|
||||
"%s: Incorrect function ID 0x%x in legacy interrupt register, ha->pf_bit = 0x%x\n",
|
||||
__func__, (leg_int_ptr & PF_BITS_MASK), ha->pf_bit);
|
||||
DEBUG2(ql4_printk(KERN_ERR, ha,
|
||||
"%s: Incorrect function ID 0x%x in legacy interrupt register, ha->pf_bit = 0x%x\n",
|
||||
__func__, (leg_int_ptr & PF_BITS_MASK),
|
||||
ha->pf_bit));
|
||||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
|
@ -1437,11 +1472,14 @@ int qla4xxx_request_irqs(struct scsi_qla_host *ha)
|
|||
|
||||
void qla4xxx_free_irqs(struct scsi_qla_host *ha)
|
||||
{
|
||||
if (test_bit(AF_MSIX_ENABLED, &ha->flags))
|
||||
qla4_8xxx_disable_msix(ha);
|
||||
else if (test_and_clear_bit(AF_MSI_ENABLED, &ha->flags)) {
|
||||
free_irq(ha->pdev->irq, ha);
|
||||
pci_disable_msi(ha->pdev);
|
||||
} else if (test_and_clear_bit(AF_INTx_ENABLED, &ha->flags))
|
||||
free_irq(ha->pdev->irq, ha);
|
||||
if (test_and_clear_bit(AF_IRQ_ATTACHED, &ha->flags)) {
|
||||
if (test_bit(AF_MSIX_ENABLED, &ha->flags)) {
|
||||
qla4_8xxx_disable_msix(ha);
|
||||
} else if (test_and_clear_bit(AF_MSI_ENABLED, &ha->flags)) {
|
||||
free_irq(ha->pdev->irq, ha);
|
||||
pci_disable_msi(ha->pdev);
|
||||
} else if (test_and_clear_bit(AF_INTx_ENABLED, &ha->flags)) {
|
||||
free_irq(ha->pdev->irq, ha);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -43,6 +43,30 @@ void qla4xxx_process_mbox_intr(struct scsi_qla_host *ha, int out_count)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* qla4xxx_is_intr_poll_mode – Are we allowed to poll for interrupts?
|
||||
* @ha: Pointer to host adapter structure.
|
||||
* @ret: 1=polling mode, 0=non-polling mode
|
||||
**/
|
||||
static int qla4xxx_is_intr_poll_mode(struct scsi_qla_host *ha)
|
||||
{
|
||||
int rval = 1;
|
||||
|
||||
if (is_qla8032(ha)) {
|
||||
if (test_bit(AF_IRQ_ATTACHED, &ha->flags) &&
|
||||
test_bit(AF_83XX_MBOX_INTR_ON, &ha->flags))
|
||||
rval = 0;
|
||||
} else {
|
||||
if (test_bit(AF_IRQ_ATTACHED, &ha->flags) &&
|
||||
test_bit(AF_INTERRUPTS_ON, &ha->flags) &&
|
||||
test_bit(AF_ONLINE, &ha->flags) &&
|
||||
!test_bit(AF_HA_REMOVAL, &ha->flags))
|
||||
rval = 0;
|
||||
}
|
||||
|
||||
return rval;
|
||||
}
|
||||
|
||||
/**
|
||||
* qla4xxx_mailbox_command - issues mailbox commands
|
||||
* @ha: Pointer to host adapter structure.
|
||||
|
@ -153,33 +177,28 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
|
|||
/*
|
||||
* Wait for completion: Poll or completion queue
|
||||
*/
|
||||
if (test_bit(AF_IRQ_ATTACHED, &ha->flags) &&
|
||||
test_bit(AF_INTERRUPTS_ON, &ha->flags) &&
|
||||
test_bit(AF_ONLINE, &ha->flags) &&
|
||||
!test_bit(AF_HA_REMOVAL, &ha->flags)) {
|
||||
/* Do not poll for completion. Use completion queue */
|
||||
set_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags);
|
||||
wait_for_completion_timeout(&ha->mbx_intr_comp, MBOX_TOV * HZ);
|
||||
clear_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags);
|
||||
} else {
|
||||
if (qla4xxx_is_intr_poll_mode(ha)) {
|
||||
/* Poll for command to complete */
|
||||
wait_count = jiffies + MBOX_TOV * HZ;
|
||||
while (test_bit(AF_MBOX_COMMAND_DONE, &ha->flags) == 0) {
|
||||
if (time_after_eq(jiffies, wait_count))
|
||||
break;
|
||||
|
||||
/*
|
||||
* Service the interrupt.
|
||||
* The ISR will save the mailbox status registers
|
||||
* to a temporary storage location in the adapter
|
||||
* structure.
|
||||
*/
|
||||
|
||||
spin_lock_irqsave(&ha->hardware_lock, flags);
|
||||
ha->isp_ops->process_mailbox_interrupt(ha, outCount);
|
||||
spin_unlock_irqrestore(&ha->hardware_lock, flags);
|
||||
msleep(10);
|
||||
}
|
||||
} else {
|
||||
/* Do not poll for completion. Use completion queue */
|
||||
set_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags);
|
||||
wait_for_completion_timeout(&ha->mbx_intr_comp, MBOX_TOV * HZ);
|
||||
clear_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags);
|
||||
}
|
||||
|
||||
/* Check for mailbox timeout. */
|
||||
|
@ -678,8 +697,24 @@ int qla4xxx_get_firmware_status(struct scsi_qla_host * ha)
|
|||
return QLA_ERROR;
|
||||
}
|
||||
|
||||
ql4_printk(KERN_INFO, ha, "%ld firmware IOCBs available (%d).\n",
|
||||
ha->host_no, mbox_sts[2]);
|
||||
/* High-water mark of IOCBs */
|
||||
ha->iocb_hiwat = mbox_sts[2];
|
||||
DEBUG2(ql4_printk(KERN_INFO, ha,
|
||||
"%s: firmware IOCBs available = %d\n", __func__,
|
||||
ha->iocb_hiwat));
|
||||
|
||||
if (ha->iocb_hiwat > IOCB_HIWAT_CUSHION)
|
||||
ha->iocb_hiwat -= IOCB_HIWAT_CUSHION;
|
||||
|
||||
/* Ideally, we should not enter this code, as the # of firmware
|
||||
* IOCBs is hard-coded in the firmware. We set a default
|
||||
* iocb_hiwat here just in case */
|
||||
if (ha->iocb_hiwat == 0) {
|
||||
ha->iocb_hiwat = REQUEST_QUEUE_DEPTH / 4;
|
||||
DEBUG2(ql4_printk(KERN_WARNING, ha,
|
||||
"%s: Setting IOCB's to = %d\n", __func__,
|
||||
ha->iocb_hiwat));
|
||||
}
|
||||
|
||||
return QLA_SUCCESS;
|
||||
}
|
||||
|
|
|
@ -2986,7 +2986,7 @@ int qla4_8xxx_load_risc(struct scsi_qla_host *ha)
|
|||
|
||||
retval = qla4_8xxx_device_state_handler(ha);
|
||||
|
||||
if (retval == QLA_SUCCESS && !test_bit(AF_INIT_DONE, &ha->flags))
|
||||
if (retval == QLA_SUCCESS && !test_bit(AF_IRQ_ATTACHED, &ha->flags))
|
||||
retval = qla4xxx_request_irqs(ha);
|
||||
|
||||
return retval;
|
||||
|
@ -3427,11 +3427,11 @@ int qla4_8xxx_get_sys_info(struct scsi_qla_host *ha)
|
|||
}
|
||||
|
||||
/* Make sure we receive the minimum required data to cache internally */
|
||||
if (mbox_sts[4] < offsetof(struct mbx_sys_info, reserved)) {
|
||||
if ((is_qla8032(ha) ? mbox_sts[3] : mbox_sts[4]) <
|
||||
offsetof(struct mbx_sys_info, reserved)) {
|
||||
DEBUG2(printk("scsi%ld: %s: GET_SYS_INFO data receive"
|
||||
" error (%x)\n", ha->host_no, __func__, mbox_sts[4]));
|
||||
goto exit_validate_mac82;
|
||||
|
||||
}
|
||||
|
||||
/* Save M.A.C. address & serial_number */
|
||||
|
@ -3463,7 +3463,7 @@ int qla4_8xxx_get_sys_info(struct scsi_qla_host *ha)
|
|||
|
||||
/* Interrupt handling helpers. */
|
||||
|
||||
int qla4_8xxx_mbx_intr_enable(struct scsi_qla_host *ha)
|
||||
int qla4_8xxx_intr_enable(struct scsi_qla_host *ha)
|
||||
{
|
||||
uint32_t mbox_cmd[MBOX_REG_COUNT];
|
||||
uint32_t mbox_sts[MBOX_REG_COUNT];
|
||||
|
@ -3484,7 +3484,7 @@ int qla4_8xxx_mbx_intr_enable(struct scsi_qla_host *ha)
|
|||
return QLA_SUCCESS;
|
||||
}
|
||||
|
||||
int qla4_8xxx_mbx_intr_disable(struct scsi_qla_host *ha)
|
||||
int qla4_8xxx_intr_disable(struct scsi_qla_host *ha)
|
||||
{
|
||||
uint32_t mbox_cmd[MBOX_REG_COUNT];
|
||||
uint32_t mbox_sts[MBOX_REG_COUNT];
|
||||
|
@ -3509,7 +3509,7 @@ int qla4_8xxx_mbx_intr_disable(struct scsi_qla_host *ha)
|
|||
void
|
||||
qla4_82xx_enable_intrs(struct scsi_qla_host *ha)
|
||||
{
|
||||
qla4_8xxx_mbx_intr_enable(ha);
|
||||
qla4_8xxx_intr_enable(ha);
|
||||
|
||||
spin_lock_irq(&ha->hardware_lock);
|
||||
/* BIT 10 - reset */
|
||||
|
@ -3522,7 +3522,7 @@ void
|
|||
qla4_82xx_disable_intrs(struct scsi_qla_host *ha)
|
||||
{
|
||||
if (test_and_clear_bit(AF_INTERRUPTS_ON, &ha->flags))
|
||||
qla4_8xxx_mbx_intr_disable(ha);
|
||||
qla4_8xxx_intr_disable(ha);
|
||||
|
||||
spin_lock_irq(&ha->hardware_lock);
|
||||
/* BIT 10 - set */
|
||||
|
|
|
@ -1337,18 +1337,18 @@ static int qla4xxx_session_get_param(struct iscsi_cls_session *cls_sess,
|
|||
sess->password_in, BIDI_CHAP,
|
||||
&idx);
|
||||
if (rval)
|
||||
return -EINVAL;
|
||||
|
||||
len = sprintf(buf, "%hu\n", idx);
|
||||
len = sprintf(buf, "\n");
|
||||
else
|
||||
len = sprintf(buf, "%hu\n", idx);
|
||||
break;
|
||||
case ISCSI_PARAM_CHAP_OUT_IDX:
|
||||
rval = qla4xxx_get_chap_index(ha, sess->username,
|
||||
sess->password, LOCAL_CHAP,
|
||||
&idx);
|
||||
if (rval)
|
||||
return -EINVAL;
|
||||
|
||||
len = sprintf(buf, "%hu\n", idx);
|
||||
len = sprintf(buf, "\n");
|
||||
else
|
||||
len = sprintf(buf, "%hu\n", idx);
|
||||
break;
|
||||
default:
|
||||
return iscsi_session_get_param(cls_sess, param, buf);
|
||||
|
@ -2242,6 +2242,7 @@ static int qla4xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
|
|||
test_bit(DPC_HA_NEED_QUIESCENT, &ha->dpc_flags) ||
|
||||
!test_bit(AF_ONLINE, &ha->flags) ||
|
||||
!test_bit(AF_LINK_UP, &ha->flags) ||
|
||||
test_bit(AF_LOOPBACK, &ha->flags) ||
|
||||
test_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags))
|
||||
goto qc_host_busy;
|
||||
|
||||
|
@ -2978,6 +2979,7 @@ static int qla4xxx_recover_adapter(struct scsi_qla_host *ha)
|
|||
if (status == QLA_SUCCESS) {
|
||||
if (!test_bit(AF_FW_RECOVERY, &ha->flags))
|
||||
qla4xxx_cmd_wait(ha);
|
||||
|
||||
ha->isp_ops->disable_intrs(ha);
|
||||
qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
|
||||
qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
|
||||
|
@ -3479,7 +3481,8 @@ static void qla4xxx_do_dpc(struct work_struct *work)
|
|||
}
|
||||
|
||||
/* ---- link change? --- */
|
||||
if (test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) {
|
||||
if (!test_bit(AF_LOOPBACK, &ha->flags) &&
|
||||
test_and_clear_bit(DPC_LINK_CHANGED, &ha->dpc_flags)) {
|
||||
if (!test_bit(AF_LINK_UP, &ha->flags)) {
|
||||
/* ---- link down? --- */
|
||||
qla4xxx_mark_all_devices_missing(ha);
|
||||
|
@ -3508,10 +3511,8 @@ static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
|
|||
{
|
||||
qla4xxx_abort_active_cmds(ha, DID_NO_CONNECT << 16);
|
||||
|
||||
if (test_bit(AF_INTERRUPTS_ON, &ha->flags)) {
|
||||
/* Turn-off interrupts on the card. */
|
||||
ha->isp_ops->disable_intrs(ha);
|
||||
}
|
||||
/* Turn-off interrupts on the card. */
|
||||
ha->isp_ops->disable_intrs(ha);
|
||||
|
||||
if (is_qla40XX(ha)) {
|
||||
writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
|
||||
|
@ -3547,8 +3548,7 @@ static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
|
|||
}
|
||||
|
||||
/* Detach interrupts */
|
||||
if (test_and_clear_bit(AF_IRQ_ATTACHED, &ha->flags))
|
||||
qla4xxx_free_irqs(ha);
|
||||
qla4xxx_free_irqs(ha);
|
||||
|
||||
/* free extra memory */
|
||||
qla4xxx_mem_free(ha);
|
||||
|
@ -4687,7 +4687,8 @@ static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
|
|||
struct iscsi_endpoint *ep;
|
||||
struct sockaddr_in *addr;
|
||||
struct sockaddr_in6 *addr6;
|
||||
struct sockaddr *dst_addr;
|
||||
struct sockaddr *t_addr;
|
||||
struct sockaddr_storage *dst_addr;
|
||||
char *ip;
|
||||
|
||||
/* TODO: need to destroy on unload iscsi_endpoint*/
|
||||
|
@ -4696,21 +4697,23 @@ static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
|
|||
return NULL;
|
||||
|
||||
if (fw_ddb_entry->options & DDB_OPT_IPV6_DEVICE) {
|
||||
dst_addr->sa_family = AF_INET6;
|
||||
t_addr = (struct sockaddr *)dst_addr;
|
||||
t_addr->sa_family = AF_INET6;
|
||||
addr6 = (struct sockaddr_in6 *)dst_addr;
|
||||
ip = (char *)&addr6->sin6_addr;
|
||||
memcpy(ip, fw_ddb_entry->ip_addr, IPv6_ADDR_LEN);
|
||||
addr6->sin6_port = htons(le16_to_cpu(fw_ddb_entry->port));
|
||||
|
||||
} else {
|
||||
dst_addr->sa_family = AF_INET;
|
||||
t_addr = (struct sockaddr *)dst_addr;
|
||||
t_addr->sa_family = AF_INET;
|
||||
addr = (struct sockaddr_in *)dst_addr;
|
||||
ip = (char *)&addr->sin_addr;
|
||||
memcpy(ip, fw_ddb_entry->ip_addr, IP_ADDR_LEN);
|
||||
addr->sin_port = htons(le16_to_cpu(fw_ddb_entry->port));
|
||||
}
|
||||
|
||||
ep = qla4xxx_ep_connect(ha->host, dst_addr, 0);
|
||||
ep = qla4xxx_ep_connect(ha->host, (struct sockaddr *)dst_addr, 0);
|
||||
vfree(dst_addr);
|
||||
return ep;
|
||||
}
|
||||
|
@ -4725,7 +4728,8 @@ static int qla4xxx_verify_boot_idx(struct scsi_qla_host *ha, uint16_t idx)
|
|||
}
|
||||
|
||||
static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
|
||||
struct ddb_entry *ddb_entry)
|
||||
struct ddb_entry *ddb_entry,
|
||||
uint16_t idx)
|
||||
{
|
||||
uint16_t def_timeout;
|
||||
|
||||
|
@ -4745,6 +4749,10 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
|
|||
def_timeout : LOGIN_TOV;
|
||||
ddb_entry->default_time2wait =
|
||||
le16_to_cpu(ddb_entry->fw_ddb_entry.iscsi_def_time2wait);
|
||||
|
||||
if (ql4xdisablesysfsboot &&
|
||||
(idx == ha->pri_ddb_idx || idx == ha->sec_ddb_idx))
|
||||
set_bit(DF_BOOT_TGT, &ddb_entry->flags);
|
||||
}
|
||||
|
||||
static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha)
|
||||
|
@ -4881,7 +4889,7 @@ static void qla4xxx_remove_failed_ddb(struct scsi_qla_host *ha,
|
|||
|
||||
static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha,
|
||||
struct dev_db_entry *fw_ddb_entry,
|
||||
int is_reset)
|
||||
int is_reset, uint16_t idx)
|
||||
{
|
||||
struct iscsi_cls_session *cls_sess;
|
||||
struct iscsi_session *sess;
|
||||
|
@ -4919,7 +4927,7 @@ static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha,
|
|||
memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry,
|
||||
sizeof(struct dev_db_entry));
|
||||
|
||||
qla4xxx_setup_flash_ddb_entry(ha, ddb_entry);
|
||||
qla4xxx_setup_flash_ddb_entry(ha, ddb_entry, idx);
|
||||
|
||||
cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), conn_id);
|
||||
|
||||
|
@ -5036,7 +5044,7 @@ static void qla4xxx_build_nt_list(struct scsi_qla_host *ha,
|
|||
goto continue_next_nt;
|
||||
}
|
||||
|
||||
ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset);
|
||||
ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset, idx);
|
||||
if (ret == QLA_ERROR)
|
||||
goto exit_nt_list;
|
||||
|
||||
|
@ -5115,6 +5123,78 @@ void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
|
|||
qla4xxx_free_ddb_index(ha);
|
||||
}
|
||||
|
||||
/**
|
||||
* qla4xxx_wait_login_resp_boot_tgt - Wait for iSCSI boot target login
|
||||
* response.
|
||||
* @ha: pointer to adapter structure
|
||||
*
|
||||
* When the boot entry is normal iSCSI target then DF_BOOT_TGT flag will be
|
||||
* set in DDB and we will wait for login response of boot targets during
|
||||
* probe.
|
||||
**/
|
||||
static void qla4xxx_wait_login_resp_boot_tgt(struct scsi_qla_host *ha)
|
||||
{
|
||||
struct ddb_entry *ddb_entry;
|
||||
struct dev_db_entry *fw_ddb_entry = NULL;
|
||||
dma_addr_t fw_ddb_entry_dma;
|
||||
unsigned long wtime;
|
||||
uint32_t ddb_state;
|
||||
int max_ddbs, idx, ret;
|
||||
|
||||
max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
|
||||
MAX_DEV_DB_ENTRIES;
|
||||
|
||||
fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
|
||||
&fw_ddb_entry_dma, GFP_KERNEL);
|
||||
if (!fw_ddb_entry) {
|
||||
ql4_printk(KERN_ERR, ha,
|
||||
"%s: Unable to allocate dma buffer\n", __func__);
|
||||
goto exit_login_resp;
|
||||
}
|
||||
|
||||
wtime = jiffies + (HZ * BOOT_LOGIN_RESP_TOV);
|
||||
|
||||
for (idx = 0; idx < max_ddbs; idx++) {
|
||||
ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, idx);
|
||||
if (ddb_entry == NULL)
|
||||
continue;
|
||||
|
||||
if (test_bit(DF_BOOT_TGT, &ddb_entry->flags)) {
|
||||
DEBUG2(ql4_printk(KERN_INFO, ha,
|
||||
"%s: DDB index [%d]\n", __func__,
|
||||
ddb_entry->fw_ddb_index));
|
||||
do {
|
||||
ret = qla4xxx_get_fwddb_entry(ha,
|
||||
ddb_entry->fw_ddb_index,
|
||||
fw_ddb_entry, fw_ddb_entry_dma,
|
||||
NULL, NULL, &ddb_state, NULL,
|
||||
NULL, NULL);
|
||||
if (ret == QLA_ERROR)
|
||||
goto exit_login_resp;
|
||||
|
||||
if ((ddb_state == DDB_DS_SESSION_ACTIVE) ||
|
||||
(ddb_state == DDB_DS_SESSION_FAILED))
|
||||
break;
|
||||
|
||||
schedule_timeout_uninterruptible(HZ);
|
||||
|
||||
} while ((time_after(wtime, jiffies)));
|
||||
|
||||
if (!time_after(wtime, jiffies)) {
|
||||
DEBUG2(ql4_printk(KERN_INFO, ha,
|
||||
"%s: Login response wait timer expired\n",
|
||||
__func__));
|
||||
goto exit_login_resp;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
exit_login_resp:
|
||||
if (fw_ddb_entry)
|
||||
dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
|
||||
fw_ddb_entry, fw_ddb_entry_dma);
|
||||
}
|
||||
|
||||
/**
|
||||
* qla4xxx_probe_adapter - callback function to probe HBA
|
||||
* @pdev: pointer to pci_dev structure
|
||||
|
@ -5270,7 +5350,7 @@ static int qla4xxx_probe_adapter(struct pci_dev *pdev,
|
|||
if (is_qla80XX(ha)) {
|
||||
ha->isp_ops->idc_lock(ha);
|
||||
dev_state = qla4_8xxx_rd_direct(ha,
|
||||
QLA82XX_CRB_DEV_STATE);
|
||||
QLA8XXX_CRB_DEV_STATE);
|
||||
ha->isp_ops->idc_unlock(ha);
|
||||
if (dev_state == QLA8XXX_DEV_FAILED) {
|
||||
ql4_printk(KERN_WARNING, ha, "%s: don't retry "
|
||||
|
@ -5368,6 +5448,7 @@ static int qla4xxx_probe_adapter(struct pci_dev *pdev,
|
|||
/* Perform the build ddb list and login to each */
|
||||
qla4xxx_build_ddb_list(ha, INIT_ADAPTER);
|
||||
iscsi_host_for_each_session(ha->host, qla4xxx_login_flash_ddb);
|
||||
qla4xxx_wait_login_resp_boot_tgt(ha);
|
||||
|
||||
qla4xxx_create_chap_list(ha);
|
||||
|
||||
|
@ -6008,14 +6089,6 @@ static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type)
|
|||
goto exit_host_reset;
|
||||
}
|
||||
|
||||
rval = qla4xxx_wait_for_hba_online(ha);
|
||||
if (rval != QLA_SUCCESS) {
|
||||
DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Unable to reset host "
|
||||
"adapter\n", __func__));
|
||||
rval = -EIO;
|
||||
goto exit_host_reset;
|
||||
}
|
||||
|
||||
if (test_bit(DPC_RESET_HA, &ha->dpc_flags))
|
||||
goto recover_adapter;
|
||||
|
||||
|
@ -6115,7 +6188,6 @@ qla4xxx_pci_mmio_enabled(struct pci_dev *pdev)
|
|||
static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
|
||||
{
|
||||
uint32_t rval = QLA_ERROR;
|
||||
uint32_t ret = 0;
|
||||
int fn;
|
||||
struct pci_dev *other_pdev = NULL;
|
||||
|
||||
|
@ -6201,16 +6273,7 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
|
|||
qla4_8xxx_wr_direct(ha, QLA8XXX_CRB_DRV_STATE, 0);
|
||||
qla4_8xxx_set_drv_active(ha);
|
||||
ha->isp_ops->idc_unlock(ha);
|
||||
ret = qla4xxx_request_irqs(ha);
|
||||
if (ret) {
|
||||
ql4_printk(KERN_WARNING, ha, "Failed to "
|
||||
"reserve interrupt %d already in use.\n",
|
||||
ha->pdev->irq);
|
||||
rval = QLA_ERROR;
|
||||
} else {
|
||||
ha->isp_ops->enable_intrs(ha);
|
||||
rval = QLA_SUCCESS;
|
||||
}
|
||||
ha->isp_ops->enable_intrs(ha);
|
||||
}
|
||||
} else {
|
||||
ql4_printk(KERN_INFO, ha, "scsi%ld: %s: devfn 0x%x is not "
|
||||
|
@ -6220,18 +6283,9 @@ static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
|
|||
QLA8XXX_DEV_READY)) {
|
||||
clear_bit(AF_FW_RECOVERY, &ha->flags);
|
||||
rval = qla4xxx_initialize_adapter(ha, RESET_ADAPTER);
|
||||
if (rval == QLA_SUCCESS) {
|
||||
ret = qla4xxx_request_irqs(ha);
|
||||
if (ret) {
|
||||
ql4_printk(KERN_WARNING, ha, "Failed to"
|
||||
" reserve interrupt %d already in"
|
||||
" use.\n", ha->pdev->irq);
|
||||
rval = QLA_ERROR;
|
||||
} else {
|
||||
ha->isp_ops->enable_intrs(ha);
|
||||
rval = QLA_SUCCESS;
|
||||
}
|
||||
}
|
||||
if (rval == QLA_SUCCESS)
|
||||
ha->isp_ops->enable_intrs(ha);
|
||||
|
||||
ha->isp_ops->idc_lock(ha);
|
||||
qla4_8xxx_set_drv_active(ha);
|
||||
ha->isp_ops->idc_unlock(ha);
|
||||
|
|
|
@ -5,4 +5,4 @@
|
|||
* See LICENSE.qla4xxx for copyright and licensing details.
|
||||
*/
|
||||
|
||||
#define QLA4XXX_DRIVER_VERSION "5.03.00-k1"
|
||||
#define QLA4XXX_DRIVER_VERSION "5.03.00-k4"
|
||||
|
|
|
@ -2503,6 +2503,15 @@ show_priv_session_creator(struct device *dev, struct device_attribute *attr,
|
|||
}
|
||||
static ISCSI_CLASS_ATTR(priv_sess, creator, S_IRUGO, show_priv_session_creator,
|
||||
NULL);
|
||||
static ssize_t
|
||||
show_priv_session_target_id(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct iscsi_cls_session *session = iscsi_dev_to_session(dev->parent);
|
||||
return sprintf(buf, "%d\n", session->target_id);
|
||||
}
|
||||
static ISCSI_CLASS_ATTR(priv_sess, target_id, S_IRUGO,
|
||||
show_priv_session_target_id, NULL);
|
||||
|
||||
#define iscsi_priv_session_attr_show(field, format) \
|
||||
static ssize_t \
|
||||
|
@ -2575,6 +2584,7 @@ static struct attribute *iscsi_session_attrs[] = {
|
|||
&dev_attr_priv_sess_creator.attr,
|
||||
&dev_attr_sess_chap_out_idx.attr,
|
||||
&dev_attr_sess_chap_in_idx.attr,
|
||||
&dev_attr_priv_sess_target_id.attr,
|
||||
NULL,
|
||||
};
|
||||
|
||||
|
@ -2638,6 +2648,8 @@ static umode_t iscsi_session_attr_is_visible(struct kobject *kobj,
|
|||
return S_IRUGO;
|
||||
else if (attr == &dev_attr_priv_sess_creator.attr)
|
||||
return S_IRUGO;
|
||||
else if (attr == &dev_attr_priv_sess_target_id.attr)
|
||||
return S_IRUGO;
|
||||
else {
|
||||
WARN_ONCE(1, "Invalid session attr");
|
||||
return 0;
|
||||
|
|
Loading…
Reference in New Issue