mirror of https://gitee.com/openkylin/linux.git
qed*: Utilize FW 8.33.1.0
Advance the qed* drivers to use firmware 8.33.1.0: Modify core driver (qed) to utilize the new FW and initialize the device with it. This is the lion's share of the patch, and includes changes to FW interface files, device initialization flows, FW interaction flows, and debug collection flows. Modify Ethernet driver (qede) to make use of new FW in fastpath. Modify RoCE/iWARP driver (qedr) to make use of new FW in fastpath. Modify FCoE driver (qedf) to make use of new FW in fastpath. Modify iSCSI driver (qedi) to make use of new FW in fastpath. Signed-off-by: Ariel Elior <Ariel.Elior@cavium.com> Signed-off-by: Michal Kalderon <Michal.Kalderon@cavium.com> Signed-off-by: Yuval Bason <Yuval.Bason@cavium.com> Signed-off-by: Ram Amrani <Ram.Amrani@cavium.com> Signed-off-by: Manish Chopra <Manish.Chopra@cavium.com> Signed-off-by: Chad Dupuis <Chad.Dupuis@cavium.com> Signed-off-by: Manish Rangankar <Manish.Rangankar@cavium.com> Signed-off-by: Tomer Tayar <Tomer.Tayar@cavium.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
21dd79e82f
commit
da09091732
|
@ -164,6 +164,13 @@ struct rdma_srq_sge {
|
|||
__le32 l_key;
|
||||
};
|
||||
|
||||
/* Rdma doorbell data for flags update */
|
||||
struct rdma_pwm_flags_data {
|
||||
__le16 icid; /* internal CID */
|
||||
u8 agg_flags; /* aggregative flags */
|
||||
u8 reserved;
|
||||
};
|
||||
|
||||
/* Rdma doorbell data for SQ and RQ */
|
||||
struct rdma_pwm_val16_data {
|
||||
__le16 icid;
|
||||
|
@ -184,8 +191,12 @@ struct rdma_pwm_val32_data {
|
|||
#define RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT 0
|
||||
#define RDMA_PWM_VAL32_DATA_BYPASS_EN_MASK 0x1
|
||||
#define RDMA_PWM_VAL32_DATA_BYPASS_EN_SHIFT 2
|
||||
#define RDMA_PWM_VAL32_DATA_RESERVED_MASK 0x1F
|
||||
#define RDMA_PWM_VAL32_DATA_RESERVED_SHIFT 3
|
||||
#define RDMA_PWM_VAL32_DATA_CONN_TYPE_IS_IWARP_MASK 0x1
|
||||
#define RDMA_PWM_VAL32_DATA_CONN_TYPE_IS_IWARP_SHIFT 3
|
||||
#define RDMA_PWM_VAL32_DATA_SET_16B_VAL_MASK 0x1
|
||||
#define RDMA_PWM_VAL32_DATA_SET_16B_VAL_SHIFT 4
|
||||
#define RDMA_PWM_VAL32_DATA_RESERVED_MASK 0x7
|
||||
#define RDMA_PWM_VAL32_DATA_RESERVED_SHIFT 5
|
||||
__le32 value;
|
||||
};
|
||||
|
||||
|
@ -492,8 +503,10 @@ struct rdma_sq_fmr_wqe {
|
|||
#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_SHIFT 5
|
||||
#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_MASK 0x1
|
||||
#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_SHIFT 6
|
||||
#define RDMA_SQ_FMR_WQE_RESERVED4_MASK 0x1FF
|
||||
#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT 7
|
||||
#define RDMA_SQ_FMR_WQE_DIF_RX_REF_TAG_CONST_MASK 0x1
|
||||
#define RDMA_SQ_FMR_WQE_DIF_RX_REF_TAG_CONST_SHIFT 7
|
||||
#define RDMA_SQ_FMR_WQE_RESERVED4_MASK 0xFF
|
||||
#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT 8
|
||||
__le32 reserved5;
|
||||
};
|
||||
|
||||
|
@ -572,8 +585,10 @@ struct rdma_sq_fmr_wqe_3rd {
|
|||
#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_SHIFT 5
|
||||
#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_MASK 0x1
|
||||
#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_SHIFT 6
|
||||
#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_MASK 0x1FF
|
||||
#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_SHIFT 7
|
||||
#define RDMA_SQ_FMR_WQE_3RD_DIF_RX_REF_TAG_CONST_MASK 0x1
|
||||
#define RDMA_SQ_FMR_WQE_3RD_DIF_RX_REF_TAG_CONST_SHIFT 7
|
||||
#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_MASK 0xFF
|
||||
#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT 8
|
||||
__le32 reserved5;
|
||||
};
|
||||
|
||||
|
@ -618,8 +633,10 @@ struct rdma_sq_rdma_wqe {
|
|||
#define RDMA_SQ_RDMA_WQE_INLINE_FLG_SHIFT 4
|
||||
#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_MASK 0x1
|
||||
#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_SHIFT 5
|
||||
#define RDMA_SQ_RDMA_WQE_RESERVED0_MASK 0x3
|
||||
#define RDMA_SQ_RDMA_WQE_RESERVED0_SHIFT 6
|
||||
#define RDMA_SQ_RDMA_WQE_READ_INV_FLG_MASK 0x1
|
||||
#define RDMA_SQ_RDMA_WQE_READ_INV_FLG_SHIFT 6
|
||||
#define RDMA_SQ_RDMA_WQE_RESERVED0_MASK 0x1
|
||||
#define RDMA_SQ_RDMA_WQE_RESERVED0_SHIFT 7
|
||||
u8 wqe_size;
|
||||
u8 prev_wqe_size;
|
||||
struct regpair remote_va;
|
||||
|
|
|
@ -85,6 +85,7 @@ config QED
|
|||
tristate "QLogic QED 25/40/100Gb core driver"
|
||||
depends on PCI
|
||||
select ZLIB_INFLATE
|
||||
select CRC8
|
||||
---help---
|
||||
This enables the support for ...
|
||||
|
||||
|
|
|
@ -778,8 +778,8 @@ static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
|
|||
return sw_fid;
|
||||
}
|
||||
|
||||
#define PURE_LB_TC 8
|
||||
#define PKT_LB_TC 9
|
||||
#define MAX_NUM_VOQS_E4 20
|
||||
|
||||
int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate);
|
||||
void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev,
|
||||
|
|
|
@ -742,7 +742,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *line_count)
|
|||
p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]);
|
||||
|
||||
qed_cxt_qm_iids(p_hwfn, &qm_iids);
|
||||
total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids,
|
||||
total = qed_qm_pf_mem_size(qm_iids.cids,
|
||||
qm_iids.vf_cids, qm_iids.tids,
|
||||
p_hwfn->qm_info.num_pqs,
|
||||
p_hwfn->qm_info.num_vf_pqs);
|
||||
|
@ -1496,20 +1496,24 @@ static void qed_cdu_init_pf(struct qed_hwfn *p_hwfn)
|
|||
}
|
||||
}
|
||||
|
||||
void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
|
||||
void qed_qm_init_pf(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ptt *p_ptt, bool is_pf_loading)
|
||||
{
|
||||
struct qed_qm_pf_rt_init_params params;
|
||||
struct qed_qm_info *qm_info = &p_hwfn->qm_info;
|
||||
struct qed_qm_pf_rt_init_params params;
|
||||
struct qed_mcp_link_state *p_link;
|
||||
struct qed_qm_iids iids;
|
||||
|
||||
memset(&iids, 0, sizeof(iids));
|
||||
qed_cxt_qm_iids(p_hwfn, &iids);
|
||||
|
||||
p_link = &QED_LEADING_HWFN(p_hwfn->cdev)->mcp_info->link_output;
|
||||
|
||||
memset(¶ms, 0, sizeof(params));
|
||||
params.port_id = p_hwfn->port_id;
|
||||
params.pf_id = p_hwfn->rel_pf_id;
|
||||
params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
|
||||
params.is_first_pf = p_hwfn->first_on_engine;
|
||||
params.is_pf_loading = is_pf_loading;
|
||||
params.num_pf_cids = iids.cids;
|
||||
params.num_vf_cids = iids.vf_cids;
|
||||
params.num_tids = iids.tids;
|
||||
|
@ -1520,6 +1524,7 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
|
|||
params.num_vports = qm_info->num_vports;
|
||||
params.pf_wfq = qm_info->pf_wfq;
|
||||
params.pf_rl = qm_info->pf_rl;
|
||||
params.link_speed = p_link->speed;
|
||||
params.pq_params = qm_info->qm_pq_params;
|
||||
params.vport_params = qm_info->qm_vport_params;
|
||||
|
||||
|
@ -1883,7 +1888,7 @@ void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn)
|
|||
|
||||
void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
|
||||
{
|
||||
qed_qm_init_pf(p_hwfn, p_ptt);
|
||||
qed_qm_init_pf(p_hwfn, p_ptt, true);
|
||||
qed_cm_init_pf(p_hwfn);
|
||||
qed_dq_init_pf(p_hwfn);
|
||||
qed_cdu_init_pf(p_hwfn);
|
||||
|
|
|
@ -169,8 +169,10 @@ void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
|
|||
*
|
||||
* @param p_hwfn
|
||||
* @param p_ptt
|
||||
* @param is_pf_loading
|
||||
*/
|
||||
void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
|
||||
void qed_qm_init_pf(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ptt *p_ptt, bool is_pf_loading);
|
||||
|
||||
/**
|
||||
* @brief Reconfigures QM pf on the fly
|
||||
|
|
|
@ -954,9 +954,7 @@ void qed_dcbx_set_pf_update_params(struct qed_dcbx_results *p_src,
|
|||
struct pf_update_ramrod_data *p_dest)
|
||||
{
|
||||
struct protocol_dcb_data *p_dcb_data;
|
||||
bool update_flag = false;
|
||||
|
||||
p_dest->pf_id = p_src->pf_id;
|
||||
u8 update_flag;
|
||||
|
||||
update_flag = p_src->arr[DCBX_PROTOCOL_FCOE].update;
|
||||
p_dest->update_fcoe_dcb_data_mode = update_flag;
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -784,7 +784,7 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
|
|||
qed_init_clear_rt_data(p_hwfn);
|
||||
|
||||
/* prepare QM portion of runtime array */
|
||||
qed_qm_init_pf(p_hwfn, p_ptt);
|
||||
qed_qm_init_pf(p_hwfn, p_ptt, false);
|
||||
|
||||
/* activate init tool on runtime array */
|
||||
rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id,
|
||||
|
@ -1527,6 +1527,11 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
|
|||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* Sanity check before the PF init sequence that uses DMAE */
|
||||
rc = qed_dmae_sanity(p_hwfn, p_ptt, "pf_phase");
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* PF Init sequence */
|
||||
rc = qed_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode);
|
||||
if (rc)
|
||||
|
@ -2192,7 +2197,7 @@ qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
|
|||
/* No need for a case for QED_CMDQS_CQS since
|
||||
* CNQ/CMDQS are the same resource.
|
||||
*/
|
||||
resc_max_val = NUM_OF_CMDQS_CQS;
|
||||
resc_max_val = NUM_OF_GLOBAL_QUEUES;
|
||||
break;
|
||||
case QED_RDMA_STATS_QUEUE:
|
||||
resc_max_val = b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2
|
||||
|
@ -2267,7 +2272,7 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn,
|
|||
case QED_RDMA_CNQ_RAM:
|
||||
case QED_CMDQS_CQS:
|
||||
/* CNQ/CMDQS are the same resource */
|
||||
*p_resc_num = NUM_OF_CMDQS_CQS / num_funcs;
|
||||
*p_resc_num = NUM_OF_GLOBAL_QUEUES / num_funcs;
|
||||
break;
|
||||
case QED_RDMA_STATS_QUEUE:
|
||||
*p_resc_num = (b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2 :
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -807,3 +807,71 @@ int qed_dmae_host2host(struct qed_hwfn *p_hwfn,
|
|||
return rc;
|
||||
}
|
||||
|
||||
int qed_dmae_sanity(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ptt *p_ptt, const char *phase)
|
||||
{
|
||||
u32 size = PAGE_SIZE / 2, val;
|
||||
struct qed_dmae_params params;
|
||||
int rc = 0;
|
||||
dma_addr_t p_phys;
|
||||
void *p_virt;
|
||||
u32 *p_tmp;
|
||||
|
||||
p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
|
||||
2 * size, &p_phys, GFP_KERNEL);
|
||||
if (!p_virt) {
|
||||
DP_NOTICE(p_hwfn,
|
||||
"DMAE sanity [%s]: failed to allocate memory\n",
|
||||
phase);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Fill the bottom half of the allocated memory with a known pattern */
|
||||
for (p_tmp = (u32 *)p_virt;
|
||||
p_tmp < (u32 *)((u8 *)p_virt + size); p_tmp++) {
|
||||
/* Save the address itself as the value */
|
||||
val = (u32)(uintptr_t)p_tmp;
|
||||
*p_tmp = val;
|
||||
}
|
||||
|
||||
/* Zero the top half of the allocated memory */
|
||||
memset((u8 *)p_virt + size, 0, size);
|
||||
|
||||
DP_VERBOSE(p_hwfn,
|
||||
QED_MSG_SP,
|
||||
"DMAE sanity [%s]: src_addr={phys 0x%llx, virt %p}, dst_addr={phys 0x%llx, virt %p}, size 0x%x\n",
|
||||
phase,
|
||||
(u64)p_phys,
|
||||
p_virt, (u64)(p_phys + size), (u8 *)p_virt + size, size);
|
||||
|
||||
memset(¶ms, 0, sizeof(params));
|
||||
rc = qed_dmae_host2host(p_hwfn, p_ptt, p_phys, p_phys + size,
|
||||
size / 4 /* size_in_dwords */, ¶ms);
|
||||
if (rc) {
|
||||
DP_NOTICE(p_hwfn,
|
||||
"DMAE sanity [%s]: qed_dmae_host2host() failed. rc = %d.\n",
|
||||
phase, rc);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Verify that the top half of the allocated memory has the pattern */
|
||||
for (p_tmp = (u32 *)((u8 *)p_virt + size);
|
||||
p_tmp < (u32 *)((u8 *)p_virt + (2 * size)); p_tmp++) {
|
||||
/* The corresponding address in the bottom half */
|
||||
val = (u32)(uintptr_t)p_tmp - size;
|
||||
|
||||
if (*p_tmp != val) {
|
||||
DP_NOTICE(p_hwfn,
|
||||
"DMAE sanity [%s]: addr={phys 0x%llx, virt %p}, read_val 0x%08x, expected_val 0x%08x\n",
|
||||
phase,
|
||||
(u64)p_phys + ((u8 *)p_tmp - (u8 *)p_virt),
|
||||
p_tmp, *p_tmp, val);
|
||||
rc = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
dma_free_coherent(&p_hwfn->cdev->pdev->dev, 2 * size, p_virt, p_phys);
|
||||
return rc;
|
||||
}
|
||||
|
|
|
@ -299,4 +299,8 @@ union qed_qm_pq_params {
|
|||
|
||||
int qed_init_fw_data(struct qed_dev *cdev,
|
||||
const u8 *fw_data);
|
||||
|
||||
int qed_dmae_sanity(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ptt *p_ptt, const char *phase);
|
||||
|
||||
#endif
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -414,11 +414,23 @@ static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn,
|
|||
}
|
||||
|
||||
/* init_ops callbacks entry point */
|
||||
static void qed_init_cmd_cb(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ptt *p_ptt,
|
||||
struct init_callback_op *p_cmd)
|
||||
static int qed_init_cmd_cb(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ptt *p_ptt,
|
||||
struct init_callback_op *p_cmd)
|
||||
{
|
||||
DP_NOTICE(p_hwfn, "Currently init values have no need of callbacks\n");
|
||||
int rc;
|
||||
|
||||
switch (p_cmd->callback_id) {
|
||||
case DMAE_READY_CB:
|
||||
rc = qed_dmae_sanity(p_hwfn, p_ptt, "engine_phase");
|
||||
break;
|
||||
default:
|
||||
DP_NOTICE(p_hwfn, "Unexpected init op callback ID %d\n",
|
||||
p_cmd->callback_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static u8 qed_init_cmd_mode_match(struct qed_hwfn *p_hwfn,
|
||||
|
@ -519,7 +531,7 @@ int qed_init_run(struct qed_hwfn *p_hwfn,
|
|||
break;
|
||||
|
||||
case INIT_OP_CALLBACK:
|
||||
qed_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
|
||||
rc = qed_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -89,7 +89,7 @@ struct qed_iscsi_conn {
|
|||
u8 local_mac[6];
|
||||
u8 remote_mac[6];
|
||||
u16 vlan_id;
|
||||
u8 tcp_flags;
|
||||
u16 tcp_flags;
|
||||
u8 ip_version;
|
||||
u32 remote_ip[4];
|
||||
u32 local_ip[4];
|
||||
|
@ -106,7 +106,6 @@ struct qed_iscsi_conn {
|
|||
u32 ss_thresh;
|
||||
u16 srtt;
|
||||
u16 rtt_var;
|
||||
u32 ts_time;
|
||||
u32 ts_recent;
|
||||
u32 ts_recent_age;
|
||||
u32 total_rt;
|
||||
|
@ -128,7 +127,6 @@ struct qed_iscsi_conn {
|
|||
u16 mss;
|
||||
u8 snd_wnd_scale;
|
||||
u8 rcv_wnd_scale;
|
||||
u32 ts_ticks_per_second;
|
||||
u16 da_timeout_value;
|
||||
u8 ack_frequency;
|
||||
|
||||
|
@ -214,9 +212,9 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
|
|||
p_init->num_sq_pages_in_ring = p_params->num_sq_pages_in_ring;
|
||||
p_init->num_r2tq_pages_in_ring = p_params->num_r2tq_pages_in_ring;
|
||||
p_init->num_uhq_pages_in_ring = p_params->num_uhq_pages_in_ring;
|
||||
p_init->ooo_enable = p_params->ooo_enable;
|
||||
p_init->ll2_rx_queue_id = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] +
|
||||
p_params->ll2_ooo_queue_id;
|
||||
|
||||
p_init->func_params.log_page_size = p_params->log_page_size;
|
||||
val = p_params->num_tasks;
|
||||
p_init->func_params.num_tasks = cpu_to_le16(val);
|
||||
|
@ -371,7 +369,7 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
|
|||
|
||||
p_tcp->vlan_id = cpu_to_le16(p_conn->vlan_id);
|
||||
|
||||
p_tcp->flags = p_conn->tcp_flags;
|
||||
p_tcp->flags = cpu_to_le16(p_conn->tcp_flags);
|
||||
p_tcp->ip_version = p_conn->ip_version;
|
||||
for (i = 0; i < 4; i++) {
|
||||
dval = p_conn->remote_ip[i];
|
||||
|
@ -436,7 +434,7 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
|
|||
p_tcp2->remote_mac_addr_lo = swab16(get_unaligned(p + 2));
|
||||
|
||||
p_tcp2->vlan_id = cpu_to_le16(p_conn->vlan_id);
|
||||
p_tcp2->flags = p_conn->tcp_flags;
|
||||
p_tcp2->flags = cpu_to_le16(p_conn->tcp_flags);
|
||||
|
||||
p_tcp2->ip_version = p_conn->ip_version;
|
||||
for (i = 0; i < 4; i++) {
|
||||
|
@ -458,6 +456,11 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
|
|||
p_tcp2->syn_ip_payload_length = cpu_to_le16(wval);
|
||||
p_tcp2->syn_phy_addr_lo = DMA_LO_LE(p_conn->syn_phy_addr);
|
||||
p_tcp2->syn_phy_addr_hi = DMA_HI_LE(p_conn->syn_phy_addr);
|
||||
p_tcp2->cwnd = cpu_to_le32(p_conn->cwnd);
|
||||
p_tcp2->ka_max_probe_cnt = p_conn->ka_probe_cnt;
|
||||
p_tcp2->ka_timeout = cpu_to_le32(p_conn->ka_timeout);
|
||||
p_tcp2->max_rt_time = cpu_to_le32(p_conn->max_rt_time);
|
||||
p_tcp2->ka_interval = cpu_to_le32(p_conn->ka_interval);
|
||||
}
|
||||
|
||||
return qed_spq_post(p_hwfn, p_ent, NULL);
|
||||
|
@ -692,8 +695,7 @@ static void __iomem *qed_iscsi_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn,
|
|||
}
|
||||
}
|
||||
|
||||
static int qed_iscsi_setup_connection(struct qed_hwfn *p_hwfn,
|
||||
struct qed_iscsi_conn *p_conn)
|
||||
static int qed_iscsi_setup_connection(struct qed_iscsi_conn *p_conn)
|
||||
{
|
||||
if (!p_conn->queue_cnts_virt_addr)
|
||||
goto nomem;
|
||||
|
@ -844,7 +846,7 @@ static int qed_iscsi_acquire_connection(struct qed_hwfn *p_hwfn,
|
|||
rc = qed_iscsi_allocate_connection(p_hwfn, &p_conn);
|
||||
|
||||
if (!rc)
|
||||
rc = qed_iscsi_setup_connection(p_hwfn, p_conn);
|
||||
rc = qed_iscsi_setup_connection(p_conn);
|
||||
|
||||
if (rc) {
|
||||
spin_lock_bh(&p_hwfn->p_iscsi_info->lock);
|
||||
|
@ -1294,7 +1296,6 @@ static int qed_iscsi_offload_conn(struct qed_dev *cdev,
|
|||
con->ss_thresh = conn_info->ss_thresh;
|
||||
con->srtt = conn_info->srtt;
|
||||
con->rtt_var = conn_info->rtt_var;
|
||||
con->ts_time = conn_info->ts_time;
|
||||
con->ts_recent = conn_info->ts_recent;
|
||||
con->ts_recent_age = conn_info->ts_recent_age;
|
||||
con->total_rt = conn_info->total_rt;
|
||||
|
@ -1316,7 +1317,6 @@ static int qed_iscsi_offload_conn(struct qed_dev *cdev,
|
|||
con->mss = conn_info->mss;
|
||||
con->snd_wnd_scale = conn_info->snd_wnd_scale;
|
||||
con->rcv_wnd_scale = conn_info->rcv_wnd_scale;
|
||||
con->ts_ticks_per_second = conn_info->ts_ticks_per_second;
|
||||
con->da_timeout_value = conn_info->da_timeout_value;
|
||||
con->ack_frequency = conn_info->ack_frequency;
|
||||
|
||||
|
|
|
@ -64,14 +64,21 @@ struct mpa_v2_hdr {
|
|||
|
||||
#define QED_IWARP_INVALID_TCP_CID 0xffffffff
|
||||
#define QED_IWARP_RCV_WND_SIZE_DEF (256 * 1024)
|
||||
#define QED_IWARP_RCV_WND_SIZE_MIN (64 * 1024)
|
||||
#define QED_IWARP_RCV_WND_SIZE_MIN (0xffff)
|
||||
#define TIMESTAMP_HEADER_SIZE (12)
|
||||
#define QED_IWARP_MAX_FIN_RT_DEFAULT (2)
|
||||
|
||||
#define QED_IWARP_TS_EN BIT(0)
|
||||
#define QED_IWARP_DA_EN BIT(1)
|
||||
#define QED_IWARP_PARAM_CRC_NEEDED (1)
|
||||
#define QED_IWARP_PARAM_P2P (1)
|
||||
|
||||
#define QED_IWARP_DEF_MAX_RT_TIME (0)
|
||||
#define QED_IWARP_DEF_CWND_FACTOR (4)
|
||||
#define QED_IWARP_DEF_KA_MAX_PROBE_CNT (5)
|
||||
#define QED_IWARP_DEF_KA_TIMEOUT (1200000) /* 20 min */
|
||||
#define QED_IWARP_DEF_KA_INTERVAL (1000) /* 1 sec */
|
||||
|
||||
static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn,
|
||||
u8 fw_event_code, u16 echo,
|
||||
union event_ring_data *data,
|
||||
|
@ -120,11 +127,17 @@ static void qed_iwarp_cid_cleaned(struct qed_hwfn *p_hwfn, u32 cid)
|
|||
spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
|
||||
}
|
||||
|
||||
void qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn,
|
||||
struct iwarp_init_func_params *p_ramrod)
|
||||
void
|
||||
qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn,
|
||||
struct iwarp_init_func_ramrod_data *p_ramrod)
|
||||
{
|
||||
p_ramrod->ll2_ooo_q_index = RESC_START(p_hwfn, QED_LL2_QUEUE) +
|
||||
p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle;
|
||||
p_ramrod->iwarp.ll2_ooo_q_index =
|
||||
RESC_START(p_hwfn, QED_LL2_QUEUE) +
|
||||
p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle;
|
||||
|
||||
p_ramrod->tcp.max_fin_rt = QED_IWARP_MAX_FIN_RT_DEFAULT;
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static int qed_iwarp_alloc_cid(struct qed_hwfn *p_hwfn, u32 *cid)
|
||||
|
@ -699,6 +712,12 @@ qed_iwarp_tcp_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
|
|||
tcp->ttl = 0x40;
|
||||
tcp->tos_or_tc = 0;
|
||||
|
||||
tcp->max_rt_time = QED_IWARP_DEF_MAX_RT_TIME;
|
||||
tcp->cwnd = QED_IWARP_DEF_CWND_FACTOR * tcp->mss;
|
||||
tcp->ka_max_probe_cnt = QED_IWARP_DEF_KA_MAX_PROBE_CNT;
|
||||
tcp->ka_timeout = QED_IWARP_DEF_KA_TIMEOUT;
|
||||
tcp->ka_interval = QED_IWARP_DEF_KA_INTERVAL;
|
||||
|
||||
tcp->rcv_wnd_scale = (u8)p_hwfn->p_rdma_info->iwarp.rcv_wnd_scale;
|
||||
tcp->connect_mode = ep->connect_mode;
|
||||
|
||||
|
@ -807,6 +826,7 @@ static int
|
|||
qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
|
||||
{
|
||||
struct iwarp_mpa_offload_ramrod_data *p_mpa_ramrod;
|
||||
struct qed_iwarp_info *iwarp_info;
|
||||
struct qed_sp_init_data init_data;
|
||||
dma_addr_t async_output_phys;
|
||||
struct qed_spq_entry *p_ent;
|
||||
|
@ -874,6 +894,8 @@ qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
|
|||
p_mpa_ramrod->common.reject = 1;
|
||||
}
|
||||
|
||||
iwarp_info = &p_hwfn->p_rdma_info->iwarp;
|
||||
p_mpa_ramrod->rcv_wnd = iwarp_info->rcv_wnd_size;
|
||||
p_mpa_ramrod->mode = ep->mpa_rev;
|
||||
SET_FIELD(p_mpa_ramrod->rtr_pref,
|
||||
IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED, ep->rtr_type);
|
||||
|
@ -2745,6 +2767,7 @@ int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
|
|||
/* value 0 is used for ilog2(QED_IWARP_RCV_WND_SIZE_MIN) */
|
||||
iwarp_info->rcv_wnd_scale = ilog2(rcv_wnd_size) -
|
||||
ilog2(QED_IWARP_RCV_WND_SIZE_MIN);
|
||||
iwarp_info->rcv_wnd_size = rcv_wnd_size >> iwarp_info->rcv_wnd_scale;
|
||||
iwarp_info->crc_needed = QED_IWARP_PARAM_CRC_NEEDED;
|
||||
iwarp_info->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED;
|
||||
|
||||
|
|
|
@ -95,6 +95,7 @@ struct qed_iwarp_info {
|
|||
spinlock_t iw_lock; /* for iwarp resources */
|
||||
spinlock_t qp_lock; /* for teardown races */
|
||||
u32 rcv_wnd_scale;
|
||||
u16 rcv_wnd_size;
|
||||
u16 max_mtu;
|
||||
u8 mac_addr[ETH_ALEN];
|
||||
u8 crc_needed;
|
||||
|
@ -187,7 +188,7 @@ int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
|
|||
struct qed_rdma_start_in_params *params);
|
||||
|
||||
void qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn,
|
||||
struct iwarp_init_func_params *p_ramrod);
|
||||
struct iwarp_init_func_ramrod_data *p_ramrod);
|
||||
|
||||
int qed_iwarp_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
|
||||
|
||||
|
|
|
@ -1969,33 +1969,45 @@ void qed_reset_vport_stats(struct qed_dev *cdev)
|
|||
_qed_get_vport_stats(cdev, cdev->reset_stats);
|
||||
}
|
||||
|
||||
static void
|
||||
qed_arfs_mode_configure(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
|
||||
struct qed_arfs_config_params *p_cfg_params)
|
||||
static enum gft_profile_type
|
||||
qed_arfs_mode_to_hsi(enum qed_filter_config_mode mode)
|
||||
{
|
||||
if (p_cfg_params->arfs_enable) {
|
||||
qed_set_rfs_mode_enable(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
|
||||
p_cfg_params->tcp, p_cfg_params->udp,
|
||||
p_cfg_params->ipv4, p_cfg_params->ipv6);
|
||||
DP_VERBOSE(p_hwfn, QED_MSG_SP,
|
||||
"tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s\n",
|
||||
if (mode == QED_FILTER_CONFIG_MODE_5_TUPLE)
|
||||
return GFT_PROFILE_TYPE_4_TUPLE;
|
||||
if (mode == QED_FILTER_CONFIG_MODE_IP_DEST)
|
||||
return GFT_PROFILE_TYPE_IP_DST_PORT;
|
||||
return GFT_PROFILE_TYPE_L4_DST_PORT;
|
||||
}
|
||||
|
||||
void qed_arfs_mode_configure(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ptt *p_ptt,
|
||||
struct qed_arfs_config_params *p_cfg_params)
|
||||
{
|
||||
if (p_cfg_params->mode != QED_FILTER_CONFIG_MODE_DISABLE) {
|
||||
qed_gft_config(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
|
||||
p_cfg_params->tcp,
|
||||
p_cfg_params->udp,
|
||||
p_cfg_params->ipv4,
|
||||
p_cfg_params->ipv6,
|
||||
qed_arfs_mode_to_hsi(p_cfg_params->mode));
|
||||
DP_VERBOSE(p_hwfn,
|
||||
QED_MSG_SP,
|
||||
"Configured Filtering: tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s mode=%08x\n",
|
||||
p_cfg_params->tcp ? "Enable" : "Disable",
|
||||
p_cfg_params->udp ? "Enable" : "Disable",
|
||||
p_cfg_params->ipv4 ? "Enable" : "Disable",
|
||||
p_cfg_params->ipv6 ? "Enable" : "Disable");
|
||||
p_cfg_params->ipv6 ? "Enable" : "Disable",
|
||||
(u32)p_cfg_params->mode);
|
||||
} else {
|
||||
qed_set_rfs_mode_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
|
||||
DP_VERBOSE(p_hwfn, QED_MSG_SP, "Disabled Filtering\n");
|
||||
qed_gft_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
|
||||
}
|
||||
|
||||
DP_VERBOSE(p_hwfn, QED_MSG_SP, "Configured ARFS mode : %s\n",
|
||||
p_cfg_params->arfs_enable ? "Enable" : "Disable");
|
||||
}
|
||||
|
||||
static int
|
||||
qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
|
||||
int
|
||||
qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
|
||||
struct qed_spq_comp_cb *p_cb,
|
||||
dma_addr_t p_addr, u16 length, u16 qid,
|
||||
u8 vport_id, bool b_is_add)
|
||||
struct qed_ntuple_filter_params *p_params)
|
||||
{
|
||||
struct rx_update_gft_filter_data *p_ramrod = NULL;
|
||||
struct qed_spq_entry *p_ent = NULL;
|
||||
|
@ -2004,13 +2016,15 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
|
|||
u8 abs_vport_id = 0;
|
||||
int rc = -EINVAL;
|
||||
|
||||
rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id);
|
||||
rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = qed_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id);
|
||||
if (rc)
|
||||
return rc;
|
||||
if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) {
|
||||
rc = qed_fw_l2_queue(p_hwfn, p_params->qid, &abs_rx_q_id);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* Get SPQ entry */
|
||||
memset(&init_data, 0, sizeof(init_data));
|
||||
|
@ -2032,17 +2046,27 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
|
|||
return rc;
|
||||
|
||||
p_ramrod = &p_ent->ramrod.rx_update_gft;
|
||||
DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_addr);
|
||||
p_ramrod->pkt_hdr_length = cpu_to_le16(length);
|
||||
p_ramrod->rx_qid_or_action_icid = cpu_to_le16(abs_rx_q_id);
|
||||
p_ramrod->vport_id = abs_vport_id;
|
||||
p_ramrod->filter_type = RFS_FILTER_TYPE;
|
||||
p_ramrod->filter_action = b_is_add ? GFT_ADD_FILTER : GFT_DELETE_FILTER;
|
||||
|
||||
DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_params->addr);
|
||||
p_ramrod->pkt_hdr_length = cpu_to_le16(p_params->length);
|
||||
|
||||
if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) {
|
||||
p_ramrod->rx_qid_valid = 1;
|
||||
p_ramrod->rx_qid = cpu_to_le16(abs_rx_q_id);
|
||||
}
|
||||
|
||||
p_ramrod->flow_id_valid = 0;
|
||||
p_ramrod->flow_id = 0;
|
||||
|
||||
p_ramrod->vport_id = cpu_to_le16((u16)abs_vport_id);
|
||||
p_ramrod->filter_action = p_params->b_is_add ? GFT_ADD_FILTER
|
||||
: GFT_DELETE_FILTER;
|
||||
|
||||
DP_VERBOSE(p_hwfn, QED_MSG_SP,
|
||||
"V[%0x], Q[%04x] - %s filter from 0x%llx [length %04xb]\n",
|
||||
abs_vport_id, abs_rx_q_id,
|
||||
b_is_add ? "Adding" : "Removing", (u64)p_addr, length);
|
||||
p_params->b_is_add ? "Adding" : "Removing",
|
||||
(u64)p_params->addr, p_params->length);
|
||||
|
||||
return qed_spq_post(p_hwfn, p_ent, NULL);
|
||||
}
|
||||
|
@ -2743,7 +2767,8 @@ static int qed_configure_filter(struct qed_dev *cdev,
|
|||
}
|
||||
}
|
||||
|
||||
static int qed_configure_arfs_searcher(struct qed_dev *cdev, bool en_searcher)
|
||||
static int qed_configure_arfs_searcher(struct qed_dev *cdev,
|
||||
enum qed_filter_config_mode mode)
|
||||
{
|
||||
struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
|
||||
struct qed_arfs_config_params arfs_config_params;
|
||||
|
@ -2753,8 +2778,7 @@ static int qed_configure_arfs_searcher(struct qed_dev *cdev, bool en_searcher)
|
|||
arfs_config_params.udp = true;
|
||||
arfs_config_params.ipv4 = true;
|
||||
arfs_config_params.ipv6 = true;
|
||||
arfs_config_params.arfs_enable = en_searcher;
|
||||
|
||||
arfs_config_params.mode = mode;
|
||||
qed_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
|
||||
&arfs_config_params);
|
||||
return 0;
|
||||
|
@ -2762,8 +2786,8 @@ static int qed_configure_arfs_searcher(struct qed_dev *cdev, bool en_searcher)
|
|||
|
||||
static void
|
||||
qed_arfs_sp_response_handler(struct qed_hwfn *p_hwfn,
|
||||
void *cookie, union event_ring_data *data,
|
||||
u8 fw_return_code)
|
||||
void *cookie,
|
||||
union event_ring_data *data, u8 fw_return_code)
|
||||
{
|
||||
struct qed_common_cb_ops *op = p_hwfn->cdev->protocol_ops.common;
|
||||
void *dev = p_hwfn->cdev->ops_cookie;
|
||||
|
@ -2771,10 +2795,10 @@ qed_arfs_sp_response_handler(struct qed_hwfn *p_hwfn,
|
|||
op->arfs_filter_op(dev, cookie, fw_return_code);
|
||||
}
|
||||
|
||||
static int qed_ntuple_arfs_filter_config(struct qed_dev *cdev, void *cookie,
|
||||
dma_addr_t mapping, u16 length,
|
||||
u16 vport_id, u16 rx_queue_id,
|
||||
bool add_filter)
|
||||
static int
|
||||
qed_ntuple_arfs_filter_config(struct qed_dev *cdev,
|
||||
void *cookie,
|
||||
struct qed_ntuple_filter_params *params)
|
||||
{
|
||||
struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
|
||||
struct qed_spq_comp_cb cb;
|
||||
|
@ -2783,9 +2807,19 @@ static int qed_ntuple_arfs_filter_config(struct qed_dev *cdev, void *cookie,
|
|||
cb.function = qed_arfs_sp_response_handler;
|
||||
cb.cookie = cookie;
|
||||
|
||||
rc = qed_configure_rfs_ntuple_filter(p_hwfn, p_hwfn->p_arfs_ptt,
|
||||
&cb, mapping, length, rx_queue_id,
|
||||
vport_id, add_filter);
|
||||
if (params->b_is_vf) {
|
||||
if (!qed_iov_is_valid_vfid(p_hwfn, params->vf_id, false,
|
||||
false)) {
|
||||
DP_INFO(p_hwfn, "vfid 0x%02x is out of bounds\n",
|
||||
params->vf_id);
|
||||
return rc;
|
||||
}
|
||||
|
||||
params->vport_id = params->vf_id + 1;
|
||||
params->qid = QED_RFS_NTUPLE_QID_RSS;
|
||||
}
|
||||
|
||||
rc = qed_configure_rfs_ntuple_filter(p_hwfn, &cb, params);
|
||||
if (rc)
|
||||
DP_NOTICE(p_hwfn,
|
||||
"Failed to issue a-RFS filter configuration\n");
|
||||
|
|
|
@ -190,7 +190,7 @@ struct qed_arfs_config_params {
|
|||
bool udp;
|
||||
bool ipv4;
|
||||
bool ipv6;
|
||||
bool arfs_enable;
|
||||
enum qed_filter_config_mode mode;
|
||||
};
|
||||
|
||||
struct qed_sp_vport_update_params {
|
||||
|
@ -277,6 +277,37 @@ void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats);
|
|||
|
||||
void qed_reset_vport_stats(struct qed_dev *cdev);
|
||||
|
||||
/**
|
||||
* *@brief qed_arfs_mode_configure -
|
||||
*
|
||||
**Enable or disable rfs mode. It must accept atleast one of tcp or udp true
|
||||
**and atleast one of ipv4 or ipv6 true to enable rfs mode.
|
||||
*
|
||||
**@param p_hwfn
|
||||
**@param p_ptt
|
||||
**@param p_cfg_params - arfs mode configuration parameters.
|
||||
*
|
||||
*/
|
||||
void qed_arfs_mode_configure(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ptt *p_ptt,
|
||||
struct qed_arfs_config_params *p_cfg_params);
|
||||
|
||||
/**
|
||||
* @brief - qed_configure_rfs_ntuple_filter
|
||||
*
|
||||
* This ramrod should be used to add or remove arfs hw filter
|
||||
*
|
||||
* @params p_hwfn
|
||||
* @params p_cb - Used for QED_SPQ_MODE_CB,where client would initialize
|
||||
* it with cookie and callback function address, if not
|
||||
* using this mode then client must pass NULL.
|
||||
* @params p_params
|
||||
*/
|
||||
int
|
||||
qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
|
||||
struct qed_spq_comp_cb *p_cb,
|
||||
struct qed_ntuple_filter_params *p_params);
|
||||
|
||||
#define MAX_QUEUES_PER_QZONE (sizeof(unsigned long) * 8)
|
||||
#define QED_QUEUE_CID_SELF (0xff)
|
||||
|
||||
|
|
|
@ -406,6 +406,9 @@ static void qed_ll2_rxq_parse_gsi(struct qed_hwfn *p_hwfn,
|
|||
data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi);
|
||||
data->opaque_data_1 = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo);
|
||||
data->u.data_length_error = p_cqe->rx_cqe_gsi.data_length_error;
|
||||
data->qp_id = le16_to_cpu(p_cqe->rx_cqe_gsi.qp_id);
|
||||
|
||||
data->src_qp = le32_to_cpu(p_cqe->rx_cqe_gsi.src_qp);
|
||||
}
|
||||
|
||||
static void qed_ll2_rxq_parse_reg(struct qed_hwfn *p_hwfn,
|
||||
|
@ -927,7 +930,8 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
|
|||
qed_chain_get_pbl_phys(&p_rx->rcq_chain));
|
||||
|
||||
p_ramrod->drop_ttl0_flg = p_ll2_conn->input.rx_drop_ttl0_flg;
|
||||
p_ramrod->inner_vlan_removal_en = p_ll2_conn->input.rx_vlan_removal_en;
|
||||
p_ramrod->inner_vlan_stripping_en =
|
||||
p_ll2_conn->input.rx_vlan_removal_en;
|
||||
p_ramrod->queue_id = p_ll2_conn->queue_id;
|
||||
p_ramrod->main_func_queue = p_ll2_conn->main_func_queue ? 1 : 0;
|
||||
|
||||
|
@ -1299,8 +1303,20 @@ int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data)
|
|||
|
||||
memcpy(&p_ll2_info->input, &data->input, sizeof(p_ll2_info->input));
|
||||
|
||||
p_ll2_info->tx_dest = (data->input.tx_dest == QED_LL2_TX_DEST_NW) ?
|
||||
CORE_TX_DEST_NW : CORE_TX_DEST_LB;
|
||||
switch (data->input.tx_dest) {
|
||||
case QED_LL2_TX_DEST_NW:
|
||||
p_ll2_info->tx_dest = CORE_TX_DEST_NW;
|
||||
break;
|
||||
case QED_LL2_TX_DEST_LB:
|
||||
p_ll2_info->tx_dest = CORE_TX_DEST_LB;
|
||||
break;
|
||||
case QED_LL2_TX_DEST_DROP:
|
||||
p_ll2_info->tx_dest = CORE_TX_DEST_DROP;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (data->input.conn_type == QED_LL2_TYPE_OOO ||
|
||||
data->input.secondary_queue)
|
||||
p_ll2_info->main_func_queue = false;
|
||||
|
@ -2281,8 +2297,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
|
|||
goto release_terminate;
|
||||
}
|
||||
|
||||
if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI &&
|
||||
cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable) {
|
||||
if (QED_LEADING_HWFN(cdev)->hw_info.personality == QED_PCI_ISCSI) {
|
||||
DP_VERBOSE(cdev, QED_MSG_STORAGE, "Starting OOO LL2 queue\n");
|
||||
rc = qed_ll2_start_ooo(cdev, params);
|
||||
if (rc) {
|
||||
|
@ -2340,8 +2355,7 @@ static int qed_ll2_stop(struct qed_dev *cdev)
|
|||
qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
|
||||
eth_zero_addr(cdev->ll2_mac_address);
|
||||
|
||||
if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI &&
|
||||
cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable)
|
||||
if (QED_LEADING_HWFN(cdev)->hw_info.personality == QED_PCI_ISCSI)
|
||||
qed_ll2_stop_ooo(cdev);
|
||||
|
||||
rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
|
||||
|
|
|
@ -2234,7 +2234,7 @@ int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len)
|
|||
DRV_MSG_CODE_NVM_READ_NVRAM,
|
||||
addr + offset +
|
||||
(bytes_to_copy <<
|
||||
DRV_MB_PARAM_NVM_LEN_SHIFT),
|
||||
DRV_MB_PARAM_NVM_LEN_OFFSET),
|
||||
&resp, &resp_param,
|
||||
&read_len,
|
||||
(u32 *)(p_buf + offset));
|
||||
|
|
|
@ -553,7 +553,7 @@ static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn,
|
|||
|
||||
if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
|
||||
qed_iwarp_init_fw_ramrod(p_hwfn,
|
||||
&p_ent->ramrod.iwarp_init_func.iwarp);
|
||||
&p_ent->ramrod.iwarp_init_func);
|
||||
p_ramrod = &p_ent->ramrod.iwarp_init_func.rdma;
|
||||
} else {
|
||||
p_ramrod = &p_ent->ramrod.roce_init_func.rdma;
|
||||
|
|
|
@ -124,6 +124,8 @@
|
|||
0x1f0434UL
|
||||
#define PRS_REG_SEARCH_TAG1 \
|
||||
0x1f0444UL
|
||||
#define PRS_REG_SEARCH_TENANT_ID \
|
||||
0x1f044cUL
|
||||
#define PRS_REG_PKT_LEN_STAT_TAGS_NOT_COUNTED_FIRST \
|
||||
0x1f0a0cUL
|
||||
#define PRS_REG_SEARCH_TCP_FIRST_FRAG \
|
||||
|
@ -200,7 +202,13 @@
|
|||
0x2e8800UL
|
||||
#define CCFC_REG_STRONG_ENABLE_VF \
|
||||
0x2e070cUL
|
||||
#define CDU_REG_CID_ADDR_PARAMS \
|
||||
#define CDU_REG_CCFC_CTX_VALID0 \
|
||||
0x580400UL
|
||||
#define CDU_REG_CCFC_CTX_VALID1 \
|
||||
0x580404UL
|
||||
#define CDU_REG_TCFC_CTX_VALID0 \
|
||||
0x580408UL
|
||||
#define CDU_REG_CID_ADDR_PARAMS \
|
||||
0x580900UL
|
||||
#define DBG_REG_CLIENT_ENABLE \
|
||||
0x010004UL
|
||||
|
@ -1277,6 +1285,46 @@
|
|||
0x0543a4UL
|
||||
#define PCIE_REG_DBG_COMMON_FORCE_FRAME_K2_E5 \
|
||||
0x0543a8UL
|
||||
#define PTLD_REG_DBG_SELECT_E5 \
|
||||
0x5a1600UL
|
||||
#define PTLD_REG_DBG_DWORD_ENABLE_E5 \
|
||||
0x5a1604UL
|
||||
#define PTLD_REG_DBG_SHIFT_E5 \
|
||||
0x5a1608UL
|
||||
#define PTLD_REG_DBG_FORCE_VALID_E5 \
|
||||
0x5a160cUL
|
||||
#define PTLD_REG_DBG_FORCE_FRAME_E5 \
|
||||
0x5a1610UL
|
||||
#define YPLD_REG_DBG_SELECT_E5 \
|
||||
0x5c1600UL
|
||||
#define YPLD_REG_DBG_DWORD_ENABLE_E5 \
|
||||
0x5c1604UL
|
||||
#define YPLD_REG_DBG_SHIFT_E5 \
|
||||
0x5c1608UL
|
||||
#define YPLD_REG_DBG_FORCE_VALID_E5 \
|
||||
0x5c160cUL
|
||||
#define YPLD_REG_DBG_FORCE_FRAME_E5 \
|
||||
0x5c1610UL
|
||||
#define RGSRC_REG_DBG_SELECT_E5 \
|
||||
0x320040UL
|
||||
#define RGSRC_REG_DBG_DWORD_ENABLE_E5 \
|
||||
0x320044UL
|
||||
#define RGSRC_REG_DBG_SHIFT_E5 \
|
||||
0x320048UL
|
||||
#define RGSRC_REG_DBG_FORCE_VALID_E5 \
|
||||
0x32004cUL
|
||||
#define RGSRC_REG_DBG_FORCE_FRAME_E5 \
|
||||
0x320050UL
|
||||
#define TGSRC_REG_DBG_SELECT_E5 \
|
||||
0x322040UL
|
||||
#define TGSRC_REG_DBG_DWORD_ENABLE_E5 \
|
||||
0x322044UL
|
||||
#define TGSRC_REG_DBG_SHIFT_E5 \
|
||||
0x322048UL
|
||||
#define TGSRC_REG_DBG_FORCE_VALID_E5 \
|
||||
0x32204cUL
|
||||
#define TGSRC_REG_DBG_FORCE_FRAME_E5 \
|
||||
0x322050UL
|
||||
#define MISC_REG_RESET_PL_UA \
|
||||
0x008050UL
|
||||
#define MISC_REG_RESET_PL_HV \
|
||||
|
@ -1433,6 +1481,8 @@
|
|||
0x340800UL
|
||||
#define BRB_REG_BIG_RAM_DATA \
|
||||
0x341500UL
|
||||
#define BRB_REG_BIG_RAM_DATA_SIZE \
|
||||
64
|
||||
#define SEM_FAST_REG_STALL_0_BB_K2 \
|
||||
0x000488UL
|
||||
#define SEM_FAST_REG_STALLED \
|
||||
|
|
|
@ -351,7 +351,9 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
|
|||
DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n");
|
||||
p_ramrod->mf_mode = MF_NPAR;
|
||||
}
|
||||
p_ramrod->outer_tag = p_hwfn->hw_info.ovlan;
|
||||
|
||||
p_ramrod->outer_tag_config.outer_tag.tci =
|
||||
cpu_to_le16(p_hwfn->hw_info.ovlan);
|
||||
|
||||
/* Place EQ address in RAMROD */
|
||||
DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
|
||||
|
@ -396,8 +398,8 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
|
|||
p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR;
|
||||
|
||||
DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
|
||||
"Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n",
|
||||
sb, sb_index, p_ramrod->outer_tag);
|
||||
"Setting event_ring_sb [id %04x index %02x], outer_tag.tci [%d]\n",
|
||||
sb, sb_index, p_ramrod->outer_tag_config.outer_tag.tci);
|
||||
|
||||
rc = qed_spq_post(p_hwfn, p_ent, NULL);
|
||||
|
||||
|
|
|
@ -153,9 +153,9 @@ static int qed_sp_vf_stop(struct qed_hwfn *p_hwfn,
|
|||
return qed_spq_post(p_hwfn, p_ent, NULL);
|
||||
}
|
||||
|
||||
static bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
|
||||
int rel_vf_id,
|
||||
bool b_enabled_only, bool b_non_malicious)
|
||||
bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
|
||||
int rel_vf_id,
|
||||
bool b_enabled_only, bool b_non_malicious)
|
||||
{
|
||||
if (!p_hwfn->pf_iov_info) {
|
||||
DP_NOTICE(p_hwfn->cdev, "No iov info\n");
|
||||
|
@ -4237,6 +4237,7 @@ qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
|
|||
static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ptt *p_ptt, int vfid, int val)
|
||||
{
|
||||
struct qed_mcp_link_state *p_link;
|
||||
struct qed_vf_info *vf;
|
||||
u8 abs_vp_id = 0;
|
||||
int rc;
|
||||
|
@ -4249,7 +4250,10 @@ static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn,
|
|||
if (rc)
|
||||
return rc;
|
||||
|
||||
return qed_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val);
|
||||
p_link = &QED_LEADING_HWFN(p_hwfn->cdev)->mcp_info->link_output;
|
||||
|
||||
return qed_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val,
|
||||
p_link->speed);
|
||||
}
|
||||
|
||||
static int
|
||||
|
|
|
@ -273,6 +273,23 @@ enum qed_iov_wq_flag {
|
|||
};
|
||||
|
||||
#ifdef CONFIG_QED_SRIOV
|
||||
/**
|
||||
* @brief Check if given VF ID @vfid is valid
|
||||
* w.r.t. @b_enabled_only value
|
||||
* if b_enabled_only = true - only enabled VF id is valid
|
||||
* else any VF id less than max_vfs is valid
|
||||
*
|
||||
* @param p_hwfn
|
||||
* @param rel_vf_id - Relative VF ID
|
||||
* @param b_enabled_only - consider only enabled VF
|
||||
* @param b_non_malicious - true iff we want to validate vf isn't malicious.
|
||||
*
|
||||
* @return bool - true for valid VF ID
|
||||
*/
|
||||
bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
|
||||
int rel_vf_id,
|
||||
bool b_enabled_only, bool b_non_malicious);
|
||||
|
||||
/**
|
||||
* @brief - Given a VF index, return index of next [including that] active VF.
|
||||
*
|
||||
|
@ -376,6 +393,13 @@ void qed_vf_start_iov_wq(struct qed_dev *cdev);
|
|||
int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled);
|
||||
void qed_inform_vf_link_state(struct qed_hwfn *hwfn);
|
||||
#else
|
||||
static inline bool
|
||||
qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
|
||||
int rel_vf_id, bool b_enabled_only, bool b_non_malicious)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn,
|
||||
u16 rel_vf_id)
|
||||
{
|
||||
|
|
|
@ -98,10 +98,18 @@ static void qede_configure_arfs_fltr(struct qede_dev *edev,
|
|||
u16 rxq_id, bool add_fltr)
|
||||
{
|
||||
const struct qed_eth_ops *op = edev->ops;
|
||||
struct qed_ntuple_filter_params params;
|
||||
|
||||
if (n->used)
|
||||
return;
|
||||
|
||||
memset(¶ms, 0, sizeof(params));
|
||||
|
||||
params.addr = n->mapping;
|
||||
params.length = n->buf_len;
|
||||
params.qid = rxq_id;
|
||||
params.b_is_add = add_fltr;
|
||||
|
||||
DP_VERBOSE(edev, NETIF_MSG_RX_STATUS,
|
||||
"%s arfs filter flow_id=%d, sw_id=%d, src_port=%d, dst_port=%d, rxq=%d\n",
|
||||
add_fltr ? "Adding" : "Deleting",
|
||||
|
@ -110,8 +118,7 @@ static void qede_configure_arfs_fltr(struct qede_dev *edev,
|
|||
|
||||
n->used = true;
|
||||
n->filter_op = add_fltr;
|
||||
op->ntuple_filter_config(edev->cdev, n, n->mapping, n->buf_len, 0,
|
||||
rxq_id, add_fltr);
|
||||
op->ntuple_filter_config(edev->cdev, n, ¶ms);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -141,7 +148,10 @@ qede_enqueue_fltr_and_config_searcher(struct qede_dev *edev,
|
|||
edev->arfs->filter_count++;
|
||||
|
||||
if (edev->arfs->filter_count == 1 && !edev->arfs->enable) {
|
||||
edev->ops->configure_arfs_searcher(edev->cdev, true);
|
||||
enum qed_filter_config_mode mode;
|
||||
|
||||
mode = QED_FILTER_CONFIG_MODE_5_TUPLE;
|
||||
edev->ops->configure_arfs_searcher(edev->cdev, mode);
|
||||
edev->arfs->enable = true;
|
||||
}
|
||||
|
||||
|
@ -160,8 +170,11 @@ qede_dequeue_fltr_and_config_searcher(struct qede_dev *edev,
|
|||
edev->arfs->filter_count--;
|
||||
|
||||
if (!edev->arfs->filter_count && edev->arfs->enable) {
|
||||
enum qed_filter_config_mode mode;
|
||||
|
||||
mode = QED_FILTER_CONFIG_MODE_DISABLE;
|
||||
edev->arfs->enable = false;
|
||||
edev->ops->configure_arfs_searcher(edev->cdev, false);
|
||||
edev->ops->configure_arfs_searcher(edev->cdev, mode);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -255,8 +268,11 @@ void qede_process_arfs_filters(struct qede_dev *edev, bool free_fltr)
|
|||
|
||||
if (!edev->arfs->filter_count) {
|
||||
if (edev->arfs->enable) {
|
||||
enum qed_filter_config_mode mode;
|
||||
|
||||
mode = QED_FILTER_CONFIG_MODE_DISABLE;
|
||||
edev->arfs->enable = false;
|
||||
edev->ops->configure_arfs_searcher(edev->cdev, false);
|
||||
edev->ops->configure_arfs_searcher(edev->cdev, mode);
|
||||
}
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
} else {
|
||||
|
|
|
@ -26,6 +26,7 @@ int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params,
|
|||
u8 fcp_cmd_payload[32])
|
||||
{
|
||||
struct e4_fcoe_task_context *ctx = task_params->context;
|
||||
const u8 val_byte = ctx->ystorm_ag_context.byte0;
|
||||
struct e4_ustorm_fcoe_task_ag_ctx *u_ag_ctx;
|
||||
struct ystorm_fcoe_task_st_ctx *y_st_ctx;
|
||||
struct tstorm_fcoe_task_st_ctx *t_st_ctx;
|
||||
|
@ -34,6 +35,7 @@ int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params,
|
|||
bool slow_sgl;
|
||||
|
||||
memset(ctx, 0, sizeof(*(ctx)));
|
||||
ctx->ystorm_ag_context.byte0 = val_byte;
|
||||
slow_sgl = scsi_is_slow_sgl(sgl_task_params->num_sges,
|
||||
sgl_task_params->small_mid_sge);
|
||||
io_size = (task_params->task_type == FCOE_TASK_TYPE_WRITE_INITIATOR ?
|
||||
|
@ -43,20 +45,20 @@ int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params,
|
|||
y_st_ctx = &ctx->ystorm_st_context;
|
||||
y_st_ctx->data_2_trns_rem = cpu_to_le32(io_size);
|
||||
y_st_ctx->task_rety_identifier = cpu_to_le32(task_retry_id);
|
||||
y_st_ctx->task_type = task_params->task_type;
|
||||
y_st_ctx->task_type = (u8)task_params->task_type;
|
||||
memcpy(&y_st_ctx->tx_info_union.fcp_cmd_payload,
|
||||
fcp_cmd_payload, sizeof(struct fcoe_fcp_cmd_payload));
|
||||
|
||||
/* Tstorm ctx */
|
||||
t_st_ctx = &ctx->tstorm_st_context;
|
||||
t_st_ctx->read_only.dev_type = (task_params->is_tape_device == 1 ?
|
||||
FCOE_TASK_DEV_TYPE_TAPE :
|
||||
FCOE_TASK_DEV_TYPE_DISK);
|
||||
t_st_ctx->read_only.dev_type = (u8)(task_params->is_tape_device == 1 ?
|
||||
FCOE_TASK_DEV_TYPE_TAPE :
|
||||
FCOE_TASK_DEV_TYPE_DISK);
|
||||
t_st_ctx->read_only.cid = cpu_to_le32(task_params->conn_cid);
|
||||
val = cpu_to_le32(task_params->cq_rss_number);
|
||||
t_st_ctx->read_only.glbl_q_num = val;
|
||||
t_st_ctx->read_only.fcp_cmd_trns_size = cpu_to_le32(io_size);
|
||||
t_st_ctx->read_only.task_type = task_params->task_type;
|
||||
t_st_ctx->read_only.task_type = (u8)task_params->task_type;
|
||||
SET_FIELD(t_st_ctx->read_write.flags,
|
||||
FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME, 1);
|
||||
t_st_ctx->read_write.rx_id = cpu_to_le16(FCOE_RX_ID);
|
||||
|
@ -88,6 +90,8 @@ int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params,
|
|||
SET_FIELD(m_st_ctx->flags,
|
||||
MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE,
|
||||
(slow_sgl ? SCSI_TX_SLOW_SGL : SCSI_FAST_SGL));
|
||||
m_st_ctx->sgl_params.sgl_num_sges =
|
||||
cpu_to_le16(sgl_task_params->num_sges);
|
||||
} else {
|
||||
/* Tstorm ctx */
|
||||
SET_FIELD(t_st_ctx->read_write.flags,
|
||||
|
@ -101,7 +105,9 @@ int init_initiator_rw_fcoe_task(struct fcoe_task_params *task_params,
|
|||
sgl_task_params);
|
||||
}
|
||||
|
||||
/* Init Sqe */
|
||||
init_common_sqe(task_params, SEND_FCOE_CMD);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -113,6 +119,7 @@ int init_initiator_midpath_unsolicited_fcoe_task(
|
|||
u8 fw_to_place_fc_header)
|
||||
{
|
||||
struct e4_fcoe_task_context *ctx = task_params->context;
|
||||
const u8 val_byte = ctx->ystorm_ag_context.byte0;
|
||||
struct e4_ustorm_fcoe_task_ag_ctx *u_ag_ctx;
|
||||
struct ystorm_fcoe_task_st_ctx *y_st_ctx;
|
||||
struct tstorm_fcoe_task_st_ctx *t_st_ctx;
|
||||
|
@ -120,6 +127,7 @@ int init_initiator_midpath_unsolicited_fcoe_task(
|
|||
u32 val;
|
||||
|
||||
memset(ctx, 0, sizeof(*(ctx)));
|
||||
ctx->ystorm_ag_context.byte0 = val_byte;
|
||||
|
||||
/* Init Ystorm */
|
||||
y_st_ctx = &ctx->ystorm_st_context;
|
||||
|
@ -129,7 +137,7 @@ int init_initiator_midpath_unsolicited_fcoe_task(
|
|||
SET_FIELD(y_st_ctx->sgl_mode,
|
||||
YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE, SCSI_FAST_SGL);
|
||||
y_st_ctx->data_2_trns_rem = cpu_to_le32(task_params->tx_io_size);
|
||||
y_st_ctx->task_type = task_params->task_type;
|
||||
y_st_ctx->task_type = (u8)task_params->task_type;
|
||||
memcpy(&y_st_ctx->tx_info_union.tx_params.mid_path,
|
||||
mid_path_fc_header, sizeof(struct fcoe_tx_mid_path_params));
|
||||
|
||||
|
@ -148,7 +156,7 @@ int init_initiator_midpath_unsolicited_fcoe_task(
|
|||
t_st_ctx->read_only.cid = cpu_to_le32(task_params->conn_cid);
|
||||
val = cpu_to_le32(task_params->cq_rss_number);
|
||||
t_st_ctx->read_only.glbl_q_num = val;
|
||||
t_st_ctx->read_only.task_type = task_params->task_type;
|
||||
t_st_ctx->read_only.task_type = (u8)task_params->task_type;
|
||||
SET_FIELD(t_st_ctx->read_write.flags,
|
||||
FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME, 1);
|
||||
t_st_ctx->read_write.rx_id = cpu_to_le16(FCOE_RX_ID);
|
||||
|
@ -182,9 +190,10 @@ int init_initiator_cleanup_fcoe_task(struct fcoe_task_params *task_params)
|
|||
}
|
||||
|
||||
int init_initiator_sequence_recovery_fcoe_task(
|
||||
struct fcoe_task_params *task_params, u32 off)
|
||||
struct fcoe_task_params *task_params, u32 desired_offset)
|
||||
{
|
||||
init_common_sqe(task_params, FCOE_SEQUENCE_RECOVERY);
|
||||
task_params->sqe->additional_info_union.seq_rec_updated_offset = off;
|
||||
task_params->sqe->additional_info_union.seq_rec_updated_offset =
|
||||
desired_offset;
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -225,19 +225,6 @@ enum fcoe_cqe_type {
|
|||
MAX_FCOE_CQE_TYPE
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* FCoE device type
|
||||
*/
|
||||
enum fcoe_device_type {
|
||||
FCOE_TASK_DEV_TYPE_DISK,
|
||||
FCOE_TASK_DEV_TYPE_TAPE,
|
||||
MAX_FCOE_DEVICE_TYPE
|
||||
};
|
||||
|
||||
|
||||
|
||||
|
||||
/*
|
||||
* FCoE fast path error codes
|
||||
*/
|
||||
|
@ -332,31 +319,6 @@ enum fcoe_sp_error_code {
|
|||
MAX_FCOE_SP_ERROR_CODE
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* FCoE SQE request type
|
||||
*/
|
||||
enum fcoe_sqe_request_type {
|
||||
SEND_FCOE_CMD,
|
||||
SEND_FCOE_MIDPATH,
|
||||
SEND_FCOE_ABTS_REQUEST,
|
||||
FCOE_EXCHANGE_CLEANUP,
|
||||
FCOE_SEQUENCE_RECOVERY,
|
||||
SEND_FCOE_XFER_RDY,
|
||||
SEND_FCOE_RSP,
|
||||
SEND_FCOE_RSP_WITH_SENSE_DATA,
|
||||
SEND_FCOE_TARGET_DATA,
|
||||
SEND_FCOE_INITIATOR_DATA,
|
||||
/*
|
||||
* Xfer Continuation (==1) ready to be sent. Previous XFERs data
|
||||
* received successfully.
|
||||
*/
|
||||
SEND_FCOE_XFER_CONTINUATION_RDY,
|
||||
SEND_FCOE_TARGET_ABTS_RSP,
|
||||
MAX_FCOE_SQE_REQUEST_TYPE
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* FCoE task TX state
|
||||
*/
|
||||
|
@ -389,34 +351,4 @@ enum fcoe_task_tx_state {
|
|||
MAX_FCOE_TASK_TX_STATE
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* FCoE task type
|
||||
*/
|
||||
enum fcoe_task_type {
|
||||
FCOE_TASK_TYPE_WRITE_INITIATOR,
|
||||
FCOE_TASK_TYPE_READ_INITIATOR,
|
||||
FCOE_TASK_TYPE_MIDPATH,
|
||||
FCOE_TASK_TYPE_UNSOLICITED,
|
||||
FCOE_TASK_TYPE_ABTS,
|
||||
FCOE_TASK_TYPE_EXCHANGE_CLEANUP,
|
||||
FCOE_TASK_TYPE_SEQUENCE_CLEANUP,
|
||||
FCOE_TASK_TYPE_WRITE_TARGET,
|
||||
FCOE_TASK_TYPE_READ_TARGET,
|
||||
FCOE_TASK_TYPE_RSP,
|
||||
FCOE_TASK_TYPE_RSP_SENSE_DATA,
|
||||
FCOE_TASK_TYPE_ABTS_TARGET,
|
||||
FCOE_TASK_TYPE_ENUM_SIZE,
|
||||
MAX_FCOE_TASK_TYPE
|
||||
};
|
||||
|
||||
struct scsi_glbl_queue_entry {
|
||||
/* Start physical address for the RQ (receive queue) PBL. */
|
||||
struct regpair rq_pbl_addr;
|
||||
/* Start physical address for the CQ (completion queue) PBL. */
|
||||
struct regpair cq_pbl_addr;
|
||||
/* Start physical address for the CMDQ (command queue) PBL. */
|
||||
struct regpair cmdq_pbl_addr;
|
||||
};
|
||||
|
||||
#endif /* __QEDF_HSI__ */
|
||||
|
|
|
@ -2005,17 +2005,18 @@ void qedf_process_unsol_compl(struct qedf_ctx *qedf, uint16_t que_idx,
|
|||
struct qedf_io_work *io_work;
|
||||
u32 bdq_idx;
|
||||
void *bdq_addr;
|
||||
struct scsi_bd *p_bd_info;
|
||||
|
||||
p_bd_info = &cqe->cqe_info.unsolic_info.bd_info;
|
||||
QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_UNSOL,
|
||||
"address.hi=%x address.lo=%x opaque_data.hi=%x "
|
||||
"opaque_data.lo=%x bdq_prod_idx=%u len=%u.\n",
|
||||
le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.address.hi),
|
||||
le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.address.lo),
|
||||
le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.opaque.hi),
|
||||
le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.opaque.lo),
|
||||
qedf->bdq_prod_idx, pktlen);
|
||||
"address.hi=%x, address.lo=%x, opaque_data.hi=%x, opaque_data.lo=%x, bdq_prod_idx=%u, len=%u\n",
|
||||
le32_to_cpu(p_bd_info->address.hi),
|
||||
le32_to_cpu(p_bd_info->address.lo),
|
||||
le32_to_cpu(p_bd_info->opaque.fcoe_opaque.hi),
|
||||
le32_to_cpu(p_bd_info->opaque.fcoe_opaque.lo),
|
||||
qedf->bdq_prod_idx, pktlen);
|
||||
|
||||
bdq_idx = le32_to_cpu(cqe->cqe_info.unsolic_info.bd_info.opaque.lo);
|
||||
bdq_idx = le32_to_cpu(p_bd_info->opaque.fcoe_opaque.lo);
|
||||
if (bdq_idx >= QEDF_BDQ_SIZE) {
|
||||
QEDF_ERR(&(qedf->dbg_ctx), "bdq_idx is out of range %d.\n",
|
||||
bdq_idx);
|
||||
|
|
|
@ -2623,9 +2623,9 @@ static int qedf_alloc_bdq(struct qedf_ctx *qedf)
|
|||
for (i = 0; i < QEDF_BDQ_SIZE; i++) {
|
||||
pbl->address.hi = cpu_to_le32(U64_HI(qedf->bdq[i].buf_dma));
|
||||
pbl->address.lo = cpu_to_le32(U64_LO(qedf->bdq[i].buf_dma));
|
||||
pbl->opaque.hi = 0;
|
||||
pbl->opaque.fcoe_opaque.hi = 0;
|
||||
/* Opaque lo data is an index into the BDQ array */
|
||||
pbl->opaque.lo = cpu_to_le32(i);
|
||||
pbl->opaque.fcoe_opaque.lo = cpu_to_le32(i);
|
||||
pbl++;
|
||||
}
|
||||
|
||||
|
|
|
@ -326,7 +326,7 @@ static void qedi_get_rq_bdq_buf(struct qedi_ctx *qedi,
|
|||
(qedi->bdq_prod_idx % qedi->rq_num_entries));
|
||||
|
||||
/* Obtain buffer address from rqe_opaque */
|
||||
idx = cqe->rqe_opaque.lo;
|
||||
idx = cqe->rqe_opaque;
|
||||
if (idx > (QEDI_BDQ_NUM - 1)) {
|
||||
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
|
||||
"wrong idx %d returned by FW, dropping the unsolicited pkt\n",
|
||||
|
@ -335,8 +335,7 @@ static void qedi_get_rq_bdq_buf(struct qedi_ctx *qedi,
|
|||
}
|
||||
|
||||
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
|
||||
"rqe_opaque.lo [0x%p], rqe_opaque.hi [0x%p], idx [%d]\n",
|
||||
cqe->rqe_opaque.lo, cqe->rqe_opaque.hi, idx);
|
||||
"rqe_opaque [0x%p], idx [%d]\n", cqe->rqe_opaque, idx);
|
||||
|
||||
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
|
||||
"unsol_cqe_type = %d\n", cqe->unsol_cqe_type);
|
||||
|
@ -363,7 +362,7 @@ static void qedi_put_rq_bdq_buf(struct qedi_ctx *qedi,
|
|||
struct scsi_bd *pbl;
|
||||
|
||||
/* Obtain buffer address from rqe_opaque */
|
||||
idx = cqe->rqe_opaque.lo;
|
||||
idx = cqe->rqe_opaque;
|
||||
if (idx > (QEDI_BDQ_NUM - 1)) {
|
||||
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
|
||||
"wrong idx %d returned by FW, dropping the unsolicited pkt\n",
|
||||
|
@ -378,8 +377,10 @@ static void qedi_put_rq_bdq_buf(struct qedi_ctx *qedi,
|
|||
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
|
||||
"pbl [0x%p] pbl->address hi [0x%llx] lo [0x%llx] idx [%d]\n",
|
||||
pbl, pbl->address.hi, pbl->address.lo, idx);
|
||||
pbl->opaque.hi = 0;
|
||||
pbl->opaque.lo = cpu_to_le32(QEDI_U64_LO(idx));
|
||||
pbl->opaque.iscsi_opaque.reserved_zero[0] = 0;
|
||||
pbl->opaque.iscsi_opaque.reserved_zero[1] = 0;
|
||||
pbl->opaque.iscsi_opaque.reserved_zero[2] = 0;
|
||||
pbl->opaque.iscsi_opaque.opaque = cpu_to_le32(idx);
|
||||
|
||||
/* Increment producer to let f/w know we've handled the frame */
|
||||
qedi->bdq_prod_idx += count;
|
||||
|
|
|
@ -204,11 +204,14 @@ static void init_default_iscsi_task(struct iscsi_task_params *task_params,
|
|||
enum iscsi_task_type task_type)
|
||||
{
|
||||
struct e4_iscsi_task_context *context;
|
||||
u16 index;
|
||||
u32 val;
|
||||
u16 index;
|
||||
u8 val_byte;
|
||||
|
||||
context = task_params->context;
|
||||
val_byte = context->mstorm_ag_context.cdu_validation;
|
||||
memset(context, 0, sizeof(*context));
|
||||
context->mstorm_ag_context.cdu_validation = val_byte;
|
||||
|
||||
for (index = 0; index <
|
||||
ARRAY_SIZE(context->ystorm_st_context.pdu_hdr.data.data);
|
||||
|
@ -498,19 +501,33 @@ static int init_rw_iscsi_task(struct iscsi_task_params *task_params,
|
|||
|
||||
cxt = task_params->context;
|
||||
|
||||
val = cpu_to_le32(task_size);
|
||||
cxt->ystorm_st_context.pdu_hdr.cmd.expected_transfer_length = val;
|
||||
init_initiator_rw_cdb_ystorm_context(&cxt->ystorm_st_context,
|
||||
cmd_params);
|
||||
val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.lo);
|
||||
cxt->mstorm_st_context.sense_db.lo = val;
|
||||
|
||||
val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.hi);
|
||||
cxt->mstorm_st_context.sense_db.hi = val;
|
||||
if (task_type == ISCSI_TASK_TYPE_TARGET_READ) {
|
||||
set_local_completion_context(cxt);
|
||||
} else if (task_type == ISCSI_TASK_TYPE_TARGET_WRITE) {
|
||||
val = cpu_to_le32(task_size +
|
||||
((struct iscsi_r2t_hdr *)pdu_header)->buffer_offset);
|
||||
cxt->ystorm_st_context.pdu_hdr.r2t.desired_data_trns_len = val;
|
||||
cxt->mstorm_st_context.expected_itt =
|
||||
cpu_to_le32(pdu_header->itt);
|
||||
} else {
|
||||
val = cpu_to_le32(task_size);
|
||||
cxt->ystorm_st_context.pdu_hdr.cmd.expected_transfer_length =
|
||||
val;
|
||||
init_initiator_rw_cdb_ystorm_context(&cxt->ystorm_st_context,
|
||||
cmd_params);
|
||||
val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.lo);
|
||||
cxt->mstorm_st_context.sense_db.lo = val;
|
||||
|
||||
val = cpu_to_le32(cmd_params->sense_data_buffer_phys_addr.hi);
|
||||
cxt->mstorm_st_context.sense_db.hi = val;
|
||||
}
|
||||
|
||||
if (task_params->tx_io_size) {
|
||||
init_dif_context_flags(&cxt->ystorm_st_context.state.dif_flags,
|
||||
dif_task_params);
|
||||
init_dif_context_flags(&cxt->ustorm_st_context.dif_flags,
|
||||
dif_task_params);
|
||||
init_scsi_sgl_context(&cxt->ystorm_st_context.state.sgl_params,
|
||||
&cxt->ystorm_st_context.state.data_desc,
|
||||
sgl_task_params);
|
||||
|
|
|
@ -52,11 +52,12 @@ void qedi_update_itt_map(struct qedi_ctx *qedi, u32 tid, u32 proto_itt,
|
|||
void qedi_get_proto_itt(struct qedi_ctx *qedi, u32 tid, u32 *proto_itt);
|
||||
void qedi_get_task_tid(struct qedi_ctx *qedi, u32 itt, int16_t *tid);
|
||||
void qedi_process_iscsi_error(struct qedi_endpoint *ep,
|
||||
struct async_data *data);
|
||||
struct iscsi_eqe_data *data);
|
||||
void qedi_start_conn_recovery(struct qedi_ctx *qedi,
|
||||
struct qedi_conn *qedi_conn);
|
||||
struct qedi_conn *qedi_get_conn_from_id(struct qedi_ctx *qedi, u32 iscsi_cid);
|
||||
void qedi_process_tcp_error(struct qedi_endpoint *ep, struct async_data *data);
|
||||
void qedi_process_tcp_error(struct qedi_endpoint *ep,
|
||||
struct iscsi_eqe_data *data);
|
||||
void qedi_mark_device_missing(struct iscsi_cls_session *cls_session);
|
||||
void qedi_mark_device_available(struct iscsi_cls_session *cls_session);
|
||||
void qedi_reset_host_mtu(struct qedi_ctx *qedi, u16 mtu);
|
||||
|
|
|
@ -539,7 +539,6 @@ static int qedi_iscsi_offload_conn(struct qedi_endpoint *qedi_ep)
|
|||
conn_info->ka_max_probe_cnt = DEF_KA_MAX_PROBE_COUNT;
|
||||
conn_info->dup_ack_theshold = 3;
|
||||
conn_info->rcv_wnd = 65535;
|
||||
conn_info->cwnd = DEF_MAX_CWND;
|
||||
|
||||
conn_info->ss_thresh = 65535;
|
||||
conn_info->srtt = 300;
|
||||
|
@ -557,8 +556,8 @@ static int qedi_iscsi_offload_conn(struct qedi_endpoint *qedi_ep)
|
|||
(qedi_ep->ip_type == TCP_IPV6),
|
||||
1, (qedi_ep->vlan_id != 0));
|
||||
|
||||
conn_info->cwnd = DEF_MAX_CWND * conn_info->mss;
|
||||
conn_info->rcv_wnd_scale = 4;
|
||||
conn_info->ts_ticks_per_second = 1000;
|
||||
conn_info->da_timeout_value = 200;
|
||||
conn_info->ack_frequency = 2;
|
||||
|
||||
|
@ -1557,7 +1556,8 @@ char *qedi_get_iscsi_error(enum iscsi_error_types err_code)
|
|||
return msg;
|
||||
}
|
||||
|
||||
void qedi_process_iscsi_error(struct qedi_endpoint *ep, struct async_data *data)
|
||||
void qedi_process_iscsi_error(struct qedi_endpoint *ep,
|
||||
struct iscsi_eqe_data *data)
|
||||
{
|
||||
struct qedi_conn *qedi_conn;
|
||||
struct qedi_ctx *qedi;
|
||||
|
@ -1603,7 +1603,8 @@ void qedi_process_iscsi_error(struct qedi_endpoint *ep, struct async_data *data)
|
|||
qedi_start_conn_recovery(qedi_conn->qedi, qedi_conn);
|
||||
}
|
||||
|
||||
void qedi_process_tcp_error(struct qedi_endpoint *ep, struct async_data *data)
|
||||
void qedi_process_tcp_error(struct qedi_endpoint *ep,
|
||||
struct iscsi_eqe_data *data)
|
||||
{
|
||||
struct qedi_conn *qedi_conn;
|
||||
|
||||
|
|
|
@ -60,7 +60,7 @@ static int qedi_iscsi_event_cb(void *context, u8 fw_event_code, void *fw_handle)
|
|||
{
|
||||
struct qedi_ctx *qedi;
|
||||
struct qedi_endpoint *qedi_ep;
|
||||
struct async_data *data;
|
||||
struct iscsi_eqe_data *data;
|
||||
int rval = 0;
|
||||
|
||||
if (!context || !fw_handle) {
|
||||
|
@ -72,18 +72,18 @@ static int qedi_iscsi_event_cb(void *context, u8 fw_event_code, void *fw_handle)
|
|||
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
|
||||
"Recv Event %d fw_handle %p\n", fw_event_code, fw_handle);
|
||||
|
||||
data = (struct async_data *)fw_handle;
|
||||
data = (struct iscsi_eqe_data *)fw_handle;
|
||||
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
|
||||
"cid=0x%x tid=0x%x err-code=0x%x fw-dbg-param=0x%x\n",
|
||||
data->cid, data->itid, data->error_code,
|
||||
data->fw_debug_param);
|
||||
"icid=0x%x conn_id=0x%x err-code=0x%x error-pdu-opcode-reserved=0x%x\n",
|
||||
data->icid, data->conn_id, data->error_code,
|
||||
data->error_pdu_opcode_reserved);
|
||||
|
||||
qedi_ep = qedi->ep_tbl[data->cid];
|
||||
qedi_ep = qedi->ep_tbl[data->icid];
|
||||
|
||||
if (!qedi_ep) {
|
||||
QEDI_WARN(&qedi->dbg_ctx,
|
||||
"Cannot process event, ep already disconnected, cid=0x%x\n",
|
||||
data->cid);
|
||||
data->icid);
|
||||
WARN_ON(1);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
@ -858,7 +858,6 @@ static int qedi_set_iscsi_pf_param(struct qedi_ctx *qedi)
|
|||
|
||||
qedi->pf_params.iscsi_pf_params.gl_rq_pi = QEDI_PROTO_CQ_PROD_IDX;
|
||||
qedi->pf_params.iscsi_pf_params.gl_cmd_pi = 1;
|
||||
qedi->pf_params.iscsi_pf_params.ooo_enable = 1;
|
||||
|
||||
err_alloc_mem:
|
||||
return rval;
|
||||
|
@ -1262,8 +1261,10 @@ static int qedi_alloc_bdq(struct qedi_ctx *qedi)
|
|||
QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_CONN,
|
||||
"pbl [0x%p] pbl->address hi [0x%llx] lo [0x%llx], idx [%d]\n",
|
||||
pbl, pbl->address.hi, pbl->address.lo, i);
|
||||
pbl->opaque.hi = 0;
|
||||
pbl->opaque.lo = cpu_to_le32(QEDI_U64_LO(i));
|
||||
pbl->opaque.iscsi_opaque.reserved_zero[0] = 0;
|
||||
pbl->opaque.iscsi_opaque.reserved_zero[1] = 0;
|
||||
pbl->opaque.iscsi_opaque.reserved_zero[2] = 0;
|
||||
pbl->opaque.iscsi_opaque.opaque = cpu_to_le16(i);
|
||||
pbl++;
|
||||
}
|
||||
|
||||
|
|
|
@ -109,8 +109,8 @@
|
|||
#define MAX_NUM_LL2_TX_STATS_COUNTERS 48
|
||||
|
||||
#define FW_MAJOR_VERSION 8
|
||||
#define FW_MINOR_VERSION 20
|
||||
#define FW_REVISION_VERSION 0
|
||||
#define FW_MINOR_VERSION 33
|
||||
#define FW_REVISION_VERSION 1
|
||||
#define FW_ENGINEERING_VERSION 0
|
||||
|
||||
/***********************/
|
||||
|
@ -148,17 +148,10 @@
|
|||
/* Traffic classes in network-facing blocks (PBF, BTB, NIG, BRB, PRS and QM) */
|
||||
#define NUM_PHYS_TCS_4PORT_K2 (4)
|
||||
#define NUM_OF_PHYS_TCS (8)
|
||||
|
||||
#define PURE_LB_TC NUM_OF_PHYS_TCS
|
||||
#define NUM_TCS_4PORT_K2 (NUM_PHYS_TCS_4PORT_K2 + 1)
|
||||
#define NUM_OF_TCS (NUM_OF_PHYS_TCS + 1)
|
||||
|
||||
#define LB_TC (NUM_OF_PHYS_TCS)
|
||||
|
||||
#define MAX_NUM_VOQS_K2 (NUM_TCS_4PORT_K2 * MAX_NUM_PORTS_K2)
|
||||
#define MAX_NUM_VOQS_BB (NUM_OF_TCS * MAX_NUM_PORTS_BB)
|
||||
#define MAX_NUM_VOQS_E4 (MAX_NUM_VOQS_K2)
|
||||
#define MAX_PHYS_VOQS (NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB)
|
||||
|
||||
/* CIDs */
|
||||
#define NUM_OF_CONNECTION_TYPES_E4 (8)
|
||||
#define NUM_OF_LCIDS (320)
|
||||
|
@ -602,6 +595,11 @@
|
|||
#define PXP_VF_BAR0_START_SDM_ZONE_A 0x4000
|
||||
#define PXP_VF_BAR0_END_SDM_ZONE_A 0x10000
|
||||
|
||||
#define PXP_VF_BAR0_START_IGU2 0x10000
|
||||
#define PXP_VF_BAR0_IGU2_LENGTH 0xD000
|
||||
#define PXP_VF_BAR0_END_IGU2 (PXP_VF_BAR0_START_IGU2 + \
|
||||
PXP_VF_BAR0_IGU2_LENGTH - 1)
|
||||
|
||||
#define PXP_VF_BAR0_GRC_WINDOW_LENGTH 32
|
||||
|
||||
#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12
|
||||
|
@ -662,14 +660,6 @@
|
|||
|
||||
#define PRS_GFT_CAM_LINES_NO_MATCH 31
|
||||
|
||||
/* Async data KCQ CQE */
|
||||
struct async_data {
|
||||
__le32 cid;
|
||||
__le16 itid;
|
||||
u8 error_code;
|
||||
u8 fw_debug_param;
|
||||
};
|
||||
|
||||
/* Interrupt coalescing TimeSet */
|
||||
struct coalescing_timeset {
|
||||
u8 value;
|
||||
|
@ -690,9 +680,26 @@ struct eth_rx_prod_data {
|
|||
__le16 cqe_prod;
|
||||
};
|
||||
|
||||
struct iscsi_eqe_data {
|
||||
__le32 cid;
|
||||
struct tcp_ulp_connect_done_params {
|
||||
__le16 mss;
|
||||
u8 snd_wnd_scale;
|
||||
u8 flags;
|
||||
#define TCP_ULP_CONNECT_DONE_PARAMS_TS_EN_MASK 0x1
|
||||
#define TCP_ULP_CONNECT_DONE_PARAMS_TS_EN_SHIFT 0
|
||||
#define TCP_ULP_CONNECT_DONE_PARAMS_RESERVED_MASK 0x7F
|
||||
#define TCP_ULP_CONNECT_DONE_PARAMS_RESERVED_SHIFT 1
|
||||
};
|
||||
|
||||
struct iscsi_connect_done_results {
|
||||
__le16 icid;
|
||||
__le16 conn_id;
|
||||
struct tcp_ulp_connect_done_params params;
|
||||
};
|
||||
|
||||
struct iscsi_eqe_data {
|
||||
__le16 icid;
|
||||
__le16 conn_id;
|
||||
__le16 reserved;
|
||||
u8 error_code;
|
||||
u8 error_pdu_opcode_reserved;
|
||||
#define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_MASK 0x3F
|
||||
|
@ -756,7 +763,7 @@ struct ustorm_queue_zone {
|
|||
|
||||
/* Status block structure */
|
||||
struct cau_pi_entry {
|
||||
u32 prod;
|
||||
__le32 prod;
|
||||
#define CAU_PI_ENTRY_PROD_VAL_MASK 0xFFFF
|
||||
#define CAU_PI_ENTRY_PROD_VAL_SHIFT 0
|
||||
#define CAU_PI_ENTRY_PI_TIMESET_MASK 0x7F
|
||||
|
@ -769,14 +776,14 @@ struct cau_pi_entry {
|
|||
|
||||
/* Status block structure */
|
||||
struct cau_sb_entry {
|
||||
u32 data;
|
||||
__le32 data;
|
||||
#define CAU_SB_ENTRY_SB_PROD_MASK 0xFFFFFF
|
||||
#define CAU_SB_ENTRY_SB_PROD_SHIFT 0
|
||||
#define CAU_SB_ENTRY_STATE0_MASK 0xF
|
||||
#define CAU_SB_ENTRY_STATE0_SHIFT 24
|
||||
#define CAU_SB_ENTRY_STATE1_MASK 0xF
|
||||
#define CAU_SB_ENTRY_STATE1_SHIFT 28
|
||||
u32 params;
|
||||
__le32 params;
|
||||
#define CAU_SB_ENTRY_SB_TIMESET0_MASK 0x7F
|
||||
#define CAU_SB_ENTRY_SB_TIMESET0_SHIFT 0
|
||||
#define CAU_SB_ENTRY_SB_TIMESET1_MASK 0x7F
|
||||
|
@ -954,7 +961,7 @@ enum igu_int_cmd {
|
|||
|
||||
/* IGU producer or consumer update command */
|
||||
struct igu_prod_cons_update {
|
||||
u32 sb_id_and_flags;
|
||||
__le32 sb_id_and_flags;
|
||||
#define IGU_PROD_CONS_UPDATE_SB_INDEX_MASK 0xFFFFFF
|
||||
#define IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT 0
|
||||
#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_MASK 0x1
|
||||
|
@ -969,7 +976,7 @@ struct igu_prod_cons_update {
|
|||
#define IGU_PROD_CONS_UPDATE_RESERVED0_SHIFT 29
|
||||
#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_MASK 0x1
|
||||
#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_SHIFT 31
|
||||
u32 reserved1;
|
||||
__le32 reserved1;
|
||||
};
|
||||
|
||||
/* Igu segments access for default status block only */
|
||||
|
@ -979,6 +986,30 @@ enum igu_seg_access {
|
|||
MAX_IGU_SEG_ACCESS
|
||||
};
|
||||
|
||||
/* Enumeration for L3 type field of parsing_and_err_flags.
|
||||
* L3Type: 0 - unknown (not ip), 1 - Ipv4, 2 - Ipv6
|
||||
* (This field can be filled according to the last-ethertype)
|
||||
*/
|
||||
enum l3_type {
|
||||
e_l3_type_unknown,
|
||||
e_l3_type_ipv4,
|
||||
e_l3_type_ipv6,
|
||||
MAX_L3_TYPE
|
||||
};
|
||||
|
||||
/* Enumeration for l4Protocol field of parsing_and_err_flags.
|
||||
* L4-protocol: 0 - none, 1 - TCP, 2 - UDP.
|
||||
* If the packet is IPv4 fragment, and its not the first fragment, the
|
||||
* protocol-type should be set to none.
|
||||
*/
|
||||
enum l4_protocol {
|
||||
e_l4_protocol_none,
|
||||
e_l4_protocol_tcp,
|
||||
e_l4_protocol_udp,
|
||||
MAX_L4_PROTOCOL
|
||||
};
|
||||
|
||||
/* Parsing and error flags field */
|
||||
struct parsing_and_err_flags {
|
||||
__le16 flags;
|
||||
#define PARSING_AND_ERR_FLAGS_L3TYPE_MASK 0x3
|
||||
|
|
|
@ -104,6 +104,27 @@
|
|||
/* Control frame check constants */
|
||||
#define ETH_CTL_FRAME_ETH_TYPE_NUM 4
|
||||
|
||||
/* GFS constants */
|
||||
#define ETH_GFT_TRASH_CAN_VPORT 0x1FF
|
||||
|
||||
/* Destination port mode */
|
||||
enum dest_port_mode {
|
||||
DEST_PORT_PHY,
|
||||
DEST_PORT_LOOPBACK,
|
||||
DEST_PORT_PHY_LOOPBACK,
|
||||
DEST_PORT_DROP,
|
||||
MAX_DEST_PORT_MODE
|
||||
};
|
||||
|
||||
/* Ethernet address type */
|
||||
enum eth_addr_type {
|
||||
BROADCAST_ADDRESS,
|
||||
MULTICAST_ADDRESS,
|
||||
UNICAST_ADDRESS,
|
||||
UNKNOWN_ADDRESS,
|
||||
MAX_ETH_ADDR_TYPE
|
||||
};
|
||||
|
||||
struct eth_tx_1st_bd_flags {
|
||||
u8 bitfields;
|
||||
#define ETH_TX_1ST_BD_FLAGS_START_BD_MASK 0x1
|
||||
|
@ -176,10 +197,6 @@ struct eth_edpm_fw_data {
|
|||
__le32 reserved;
|
||||
};
|
||||
|
||||
struct eth_fast_path_cqe_fw_debug {
|
||||
__le16 reserved2;
|
||||
};
|
||||
|
||||
/* Tunneling parsing flags */
|
||||
struct eth_tunnel_parsing_flags {
|
||||
u8 flags;
|
||||
|
@ -226,9 +243,9 @@ struct eth_fast_path_rx_reg_cqe {
|
|||
u8 placement_offset;
|
||||
struct eth_tunnel_parsing_flags tunnel_pars_flags;
|
||||
u8 bd_num;
|
||||
u8 reserved[9];
|
||||
struct eth_fast_path_cqe_fw_debug fw_debug;
|
||||
u8 reserved1[3];
|
||||
u8 reserved;
|
||||
__le16 flow_id;
|
||||
u8 reserved1[11];
|
||||
struct eth_pmd_flow_flags pmd_flags;
|
||||
};
|
||||
|
||||
|
@ -280,7 +297,7 @@ struct eth_fast_path_rx_tpa_start_cqe {
|
|||
u8 tpa_agg_index;
|
||||
u8 header_len;
|
||||
__le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE];
|
||||
struct eth_fast_path_cqe_fw_debug fw_debug;
|
||||
__le16 flow_id;
|
||||
u8 reserved;
|
||||
struct eth_pmd_flow_flags pmd_flags;
|
||||
};
|
||||
|
|
|
@ -580,10 +580,12 @@ struct fcoe_conn_offload_ramrod_data {
|
|||
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_SHIFT 2
|
||||
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_MASK 0x1
|
||||
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_SHIFT 3
|
||||
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_SINGLE_VLAN_MASK 0x1
|
||||
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_SINGLE_VLAN_SHIFT 4
|
||||
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_MASK 0x3
|
||||
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_SHIFT 4
|
||||
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_MASK 0x3
|
||||
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_SHIFT 6
|
||||
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_SHIFT 5
|
||||
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_MASK 0x1
|
||||
#define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_SHIFT 7
|
||||
__le16 conn_id;
|
||||
u8 def_q_idx;
|
||||
u8 reserved[5];
|
||||
|
@ -594,6 +596,13 @@ struct fcoe_conn_terminate_ramrod_data {
|
|||
struct regpair terminate_params_addr;
|
||||
};
|
||||
|
||||
/* FCoE device type */
|
||||
enum fcoe_device_type {
|
||||
FCOE_TASK_DEV_TYPE_DISK,
|
||||
FCOE_TASK_DEV_TYPE_TAPE,
|
||||
MAX_FCOE_DEVICE_TYPE
|
||||
};
|
||||
|
||||
/* Data sgl */
|
||||
struct fcoe_fast_sgl_ctx {
|
||||
struct regpair sgl_start_addr;
|
||||
|
@ -608,7 +617,7 @@ struct fcoe_init_func_ramrod_data {
|
|||
struct scsi_init_func_queues q_params;
|
||||
__le16 mtu;
|
||||
__le16 sq_num_pages_in_pbl;
|
||||
__le32 reserved;
|
||||
__le32 reserved[3];
|
||||
};
|
||||
|
||||
/* FCoE: Mode of the connection: Target or Initiator or both */
|
||||
|
@ -633,11 +642,46 @@ struct fcoe_rx_stat {
|
|||
__le32 rsrv;
|
||||
};
|
||||
|
||||
/* FCoE SQE request type */
|
||||
enum fcoe_sqe_request_type {
|
||||
SEND_FCOE_CMD,
|
||||
SEND_FCOE_MIDPATH,
|
||||
SEND_FCOE_ABTS_REQUEST,
|
||||
FCOE_EXCHANGE_CLEANUP,
|
||||
FCOE_SEQUENCE_RECOVERY,
|
||||
SEND_FCOE_XFER_RDY,
|
||||
SEND_FCOE_RSP,
|
||||
SEND_FCOE_RSP_WITH_SENSE_DATA,
|
||||
SEND_FCOE_TARGET_DATA,
|
||||
SEND_FCOE_INITIATOR_DATA,
|
||||
SEND_FCOE_XFER_CONTINUATION_RDY,
|
||||
SEND_FCOE_TARGET_ABTS_RSP,
|
||||
MAX_FCOE_SQE_REQUEST_TYPE
|
||||
};
|
||||
|
||||
/* FCoe statistics request */
|
||||
struct fcoe_stat_ramrod_data {
|
||||
struct regpair stat_params_addr;
|
||||
};
|
||||
|
||||
/* FCoE task type */
|
||||
enum fcoe_task_type {
|
||||
FCOE_TASK_TYPE_WRITE_INITIATOR,
|
||||
FCOE_TASK_TYPE_READ_INITIATOR,
|
||||
FCOE_TASK_TYPE_MIDPATH,
|
||||
FCOE_TASK_TYPE_UNSOLICITED,
|
||||
FCOE_TASK_TYPE_ABTS,
|
||||
FCOE_TASK_TYPE_EXCHANGE_CLEANUP,
|
||||
FCOE_TASK_TYPE_SEQUENCE_CLEANUP,
|
||||
FCOE_TASK_TYPE_WRITE_TARGET,
|
||||
FCOE_TASK_TYPE_READ_TARGET,
|
||||
FCOE_TASK_TYPE_RSP,
|
||||
FCOE_TASK_TYPE_RSP_SENSE_DATA,
|
||||
FCOE_TASK_TYPE_ABTS_TARGET,
|
||||
FCOE_TASK_TYPE_ENUM_SIZE,
|
||||
MAX_FCOE_TASK_TYPE
|
||||
};
|
||||
|
||||
/* Per PF FCoE transmit path statistics - pStorm RAM structure */
|
||||
struct fcoe_tx_stat {
|
||||
struct regpair fcoe_tx_byte_cnt;
|
||||
|
|
|
@ -106,6 +106,12 @@
|
|||
#define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN (0x10)
|
||||
#define CQE_ERROR_BITMAP_DATA_TRUNCATED (0x20)
|
||||
|
||||
/* Union of data bd_opaque/ tq_tid */
|
||||
union bd_opaque_tq_union {
|
||||
__le16 bd_opaque;
|
||||
__le16 tq_tid;
|
||||
};
|
||||
|
||||
/* ISCSI SGL entry */
|
||||
struct cqe_error_bitmap {
|
||||
u8 cqe_error_status_bits;
|
||||
|
@ -133,6 +139,65 @@ struct data_hdr {
|
|||
__le32 data[12];
|
||||
};
|
||||
|
||||
struct lun_mapper_addr_reserved {
|
||||
struct regpair lun_mapper_addr;
|
||||
u8 reserved0[8];
|
||||
};
|
||||
|
||||
/* rdif conetxt for dif on immediate */
|
||||
struct dif_on_immediate_params {
|
||||
__le32 initial_ref_tag;
|
||||
__le16 application_tag;
|
||||
__le16 application_tag_mask;
|
||||
__le16 flags1;
|
||||
#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_GUARD_MASK 0x1
|
||||
#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_GUARD_SHIFT 0
|
||||
#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_APP_TAG_MASK 0x1
|
||||
#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_APP_TAG_SHIFT 1
|
||||
#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_REF_TAG_MASK 0x1
|
||||
#define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_REF_TAG_SHIFT 2
|
||||
#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_GUARD_MASK 0x1
|
||||
#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_GUARD_SHIFT 3
|
||||
#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_MASK 0x1
|
||||
#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_SHIFT 4
|
||||
#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_MASK 0x1
|
||||
#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_SHIFT 5
|
||||
#define DIF_ON_IMMEDIATE_PARAMS_INTERVAL_SIZE_MASK 0x1
|
||||
#define DIF_ON_IMMEDIATE_PARAMS_INTERVAL_SIZE_SHIFT 6
|
||||
#define DIF_ON_IMMEDIATE_PARAMS_NETWORK_INTERFACE_MASK 0x1
|
||||
#define DIF_ON_IMMEDIATE_PARAMS_NETWORK_INTERFACE_SHIFT 7
|
||||
#define DIF_ON_IMMEDIATE_PARAMS_HOST_INTERFACE_MASK 0x3
|
||||
#define DIF_ON_IMMEDIATE_PARAMS_HOST_INTERFACE_SHIFT 8
|
||||
#define DIF_ON_IMMEDIATE_PARAMS_REF_TAG_MASK_MASK 0xF
|
||||
#define DIF_ON_IMMEDIATE_PARAMS_REF_TAG_MASK_SHIFT 10
|
||||
#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_WITH_MASK_MASK 0x1
|
||||
#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_WITH_MASK_SHIFT 14
|
||||
#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_WITH_MASK_MASK 0x1
|
||||
#define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_WITH_MASK_SHIFT 15
|
||||
u8 flags0;
|
||||
#define DIF_ON_IMMEDIATE_PARAMS_RESERVED_MASK 0x1
|
||||
#define DIF_ON_IMMEDIATE_PARAMS_RESERVED_SHIFT 0
|
||||
#define DIF_ON_IMMEDIATE_PARAMS_IGNORE_APP_TAG_MASK 0x1
|
||||
#define DIF_ON_IMMEDIATE_PARAMS_IGNORE_APP_TAG_SHIFT 1
|
||||
#define DIF_ON_IMMEDIATE_PARAMS_INITIAL_REF_TAG_IS_VALID_MASK 0x1
|
||||
#define DIF_ON_IMMEDIATE_PARAMS_INITIAL_REF_TAG_IS_VALID_SHIFT 2
|
||||
#define DIF_ON_IMMEDIATE_PARAMS_HOST_GUARD_TYPE_MASK 0x1
|
||||
#define DIF_ON_IMMEDIATE_PARAMS_HOST_GUARD_TYPE_SHIFT 3
|
||||
#define DIF_ON_IMMEDIATE_PARAMS_PROTECTION_TYPE_MASK 0x3
|
||||
#define DIF_ON_IMMEDIATE_PARAMS_PROTECTION_TYPE_SHIFT 4
|
||||
#define DIF_ON_IMMEDIATE_PARAMS_CRC_SEED_MASK 0x1
|
||||
#define DIF_ON_IMMEDIATE_PARAMS_CRC_SEED_SHIFT 6
|
||||
#define DIF_ON_IMMEDIATE_PARAMS_KEEP_REF_TAG_CONST_MASK 0x1
|
||||
#define DIF_ON_IMMEDIATE_PARAMS_KEEP_REF_TAG_CONST_SHIFT 7
|
||||
u8 reserved_zero[5];
|
||||
};
|
||||
|
||||
/* iSCSI dif on immediate mode attributes union */
|
||||
union dif_configuration_params {
|
||||
struct lun_mapper_addr_reserved lun_mapper_address;
|
||||
struct dif_on_immediate_params def_dif_conf;
|
||||
};
|
||||
|
||||
/* Union of data/r2t sequence number */
|
||||
union iscsi_seq_num {
|
||||
__le16 data_sn;
|
||||
|
@ -163,8 +228,10 @@ struct ystorm_iscsi_task_state {
|
|||
#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_SHIFT 0
|
||||
#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_MASK 0x1
|
||||
#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_SHIFT 1
|
||||
#define YSTORM_ISCSI_TASK_STATE_RESERVED0_MASK 0x3F
|
||||
#define YSTORM_ISCSI_TASK_STATE_RESERVED0_SHIFT 2
|
||||
#define YSTORM_ISCSI_TASK_STATE_SET_DIF_OFFSET_MASK 0x1
|
||||
#define YSTORM_ISCSI_TASK_STATE_SET_DIF_OFFSET_SHIFT 2
|
||||
#define YSTORM_ISCSI_TASK_STATE_RESERVED0_MASK 0x1F
|
||||
#define YSTORM_ISCSI_TASK_STATE_RESERVED0_SHIFT 3
|
||||
};
|
||||
|
||||
/* The iscsi storm task context of Ystorm */
|
||||
|
@ -846,7 +913,7 @@ struct mstorm_iscsi_task_st_ctx {
|
|||
__le32 data_buffer_offset;
|
||||
u8 task_type;
|
||||
struct iscsi_dif_flags dif_flags;
|
||||
u8 reserved0[2];
|
||||
__le16 dif_task_icid;
|
||||
struct regpair sense_db;
|
||||
__le32 expected_itt;
|
||||
__le32 reserved1;
|
||||
|
@ -860,6 +927,10 @@ struct iscsi_reg1 {
|
|||
#define ISCSI_REG1_RESERVED1_SHIFT 4
|
||||
};
|
||||
|
||||
struct tqe_opaque {
|
||||
__le16 opaque[2];
|
||||
};
|
||||
|
||||
/* The iscsi storm task context of Ustorm */
|
||||
struct ustorm_iscsi_task_st_ctx {
|
||||
__le32 rem_rcv_len;
|
||||
|
@ -874,7 +945,7 @@ struct ustorm_iscsi_task_st_ctx {
|
|||
#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_SHIFT 1
|
||||
struct iscsi_dif_flags dif_flags;
|
||||
__le16 reserved3;
|
||||
__le32 reserved4;
|
||||
struct tqe_opaque tqe_opaque_list;
|
||||
__le32 reserved5;
|
||||
__le32 reserved6;
|
||||
__le32 reserved7;
|
||||
|
@ -946,6 +1017,18 @@ struct iscsi_conn_offload_params {
|
|||
__le32 stat_sn;
|
||||
};
|
||||
|
||||
/* iSCSI connection statistics */
|
||||
struct iscsi_conn_stats_params {
|
||||
struct regpair iscsi_tcp_tx_packets_cnt;
|
||||
struct regpair iscsi_tcp_tx_bytes_cnt;
|
||||
struct regpair iscsi_tcp_tx_rxmit_cnt;
|
||||
struct regpair iscsi_tcp_rx_packets_cnt;
|
||||
struct regpair iscsi_tcp_rx_bytes_cnt;
|
||||
struct regpair iscsi_tcp_rx_dup_ack_cnt;
|
||||
__le32 iscsi_tcp_rx_chksum_err_cnt;
|
||||
__le32 reserved;
|
||||
};
|
||||
|
||||
/* spe message header */
|
||||
struct iscsi_slow_path_hdr {
|
||||
u8 op_code;
|
||||
|
@ -978,14 +1061,17 @@ struct iscsi_conn_update_ramrod_params {
|
|||
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_SHIFT 4
|
||||
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_MASK 0x1
|
||||
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_SHIFT 5
|
||||
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_MASK 0x3
|
||||
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_SHIFT 6
|
||||
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_IMM_EN_MASK 0x1
|
||||
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_IMM_EN_SHIFT 6
|
||||
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_LUN_MAPPER_EN_MASK 0x1
|
||||
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_LUN_MAPPER_EN_SHIFT 7
|
||||
u8 reserved0[3];
|
||||
__le32 max_seq_size;
|
||||
__le32 max_send_pdu_length;
|
||||
__le32 max_recv_pdu_length;
|
||||
__le32 first_seq_length;
|
||||
__le32 exp_stat_sn;
|
||||
union dif_configuration_params dif_on_imme_params;
|
||||
};
|
||||
|
||||
/* iSCSI CQ element */
|
||||
|
@ -1007,7 +1093,7 @@ struct iscsi_cqe_solicited {
|
|||
u8 fw_dbg_field;
|
||||
u8 caused_conn_err;
|
||||
u8 reserved0[3];
|
||||
__le32 reserved1[1];
|
||||
__le32 data_truncated_bytes;
|
||||
union iscsi_task_hdr iscsi_hdr;
|
||||
};
|
||||
|
||||
|
@ -1019,7 +1105,8 @@ struct iscsi_cqe_unsolicited {
|
|||
__le16 reserved0;
|
||||
u8 reserved1;
|
||||
u8 unsol_cqe_type;
|
||||
struct regpair rqe_opaque;
|
||||
__le16 rqe_opaque;
|
||||
__le16 reserved2[3];
|
||||
union iscsi_task_hdr iscsi_hdr;
|
||||
};
|
||||
|
||||
|
@ -1053,22 +1140,22 @@ enum iscsi_cqe_unsolicited_type {
|
|||
/* iscsi debug modes */
|
||||
struct iscsi_debug_modes {
|
||||
u8 flags;
|
||||
#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_MASK 0x1
|
||||
#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_SHIFT 0
|
||||
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_MASK 0x1
|
||||
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_SHIFT 1
|
||||
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_MASK 0x1
|
||||
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_SHIFT 2
|
||||
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_MASK 0x1
|
||||
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_SHIFT 3
|
||||
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_MASK 0x1
|
||||
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_SHIFT 4
|
||||
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_MASK 0x1
|
||||
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_SHIFT 5
|
||||
#define ISCSI_DEBUG_MODES_ASSERT_IF_DATA_DIGEST_ERROR_MASK 0x1
|
||||
#define ISCSI_DEBUG_MODES_ASSERT_IF_DATA_DIGEST_ERROR_SHIFT 6
|
||||
#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_ERROR_MASK 0x1
|
||||
#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_ERROR_SHIFT 7
|
||||
#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_MASK 0x1
|
||||
#define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_SHIFT 0
|
||||
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_MASK 0x1
|
||||
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_SHIFT 1
|
||||
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_MASK 0x1
|
||||
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_SHIFT 2
|
||||
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_MASK 0x1
|
||||
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_SHIFT 3
|
||||
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_MASK 0x1
|
||||
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_SHIFT 4
|
||||
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_MASK 0x1
|
||||
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_SHIFT 5
|
||||
#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_OR_DATA_DIGEST_ERROR_MASK 0x1
|
||||
#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_OR_DATA_DIGEST_ERROR_SHIFT 6
|
||||
#define ISCSI_DEBUG_MODES_ASSERT_IF_HQ_CORRUPT_MASK 0x1
|
||||
#define ISCSI_DEBUG_MODES_ASSERT_IF_HQ_CORRUPT_SHIFT 7
|
||||
};
|
||||
|
||||
/* iSCSI kernel completion queue IDs */
|
||||
|
@ -1080,9 +1167,9 @@ enum iscsi_eqe_opcode {
|
|||
ISCSI_EVENT_TYPE_CLEAR_SQ,
|
||||
ISCSI_EVENT_TYPE_TERMINATE_CONN,
|
||||
ISCSI_EVENT_TYPE_MAC_UPDATE_CONN,
|
||||
ISCSI_EVENT_TYPE_COLLECT_STATS_CONN,
|
||||
ISCSI_EVENT_TYPE_ASYN_CONNECT_COMPLETE,
|
||||
ISCSI_EVENT_TYPE_ASYN_TERMINATE_DONE,
|
||||
RESERVED9,
|
||||
ISCSI_EVENT_TYPE_START_OF_ERROR_TYPES = 10,
|
||||
ISCSI_EVENT_TYPE_ASYN_ABORT_RCVD,
|
||||
ISCSI_EVENT_TYPE_ASYN_CLOSE_RCVD,
|
||||
|
@ -1158,6 +1245,7 @@ enum iscsi_ramrod_cmd_id {
|
|||
ISCSI_RAMROD_CMD_ID_TERMINATION_CONN = 5,
|
||||
ISCSI_RAMROD_CMD_ID_CLEAR_SQ = 6,
|
||||
ISCSI_RAMROD_CMD_ID_MAC_UPDATE = 7,
|
||||
ISCSI_RAMROD_CMD_ID_CONN_STATS = 8,
|
||||
MAX_ISCSI_RAMROD_CMD_ID
|
||||
};
|
||||
|
||||
|
@ -1194,6 +1282,16 @@ struct iscsi_spe_conn_offload_option2 {
|
|||
struct tcp_offload_params_opt2 tcp;
|
||||
};
|
||||
|
||||
/* iSCSI collect connection statistics request */
|
||||
struct iscsi_spe_conn_statistics {
|
||||
struct iscsi_slow_path_hdr hdr;
|
||||
__le16 conn_id;
|
||||
__le32 fw_cid;
|
||||
u8 reset_stats;
|
||||
u8 reserved0[7];
|
||||
struct regpair stats_cnts_addr;
|
||||
};
|
||||
|
||||
/* iSCSI connection termination request */
|
||||
struct iscsi_spe_conn_termination {
|
||||
struct iscsi_slow_path_hdr hdr;
|
||||
|
@ -1220,12 +1318,14 @@ struct iscsi_spe_func_init {
|
|||
u8 num_r2tq_pages_in_ring;
|
||||
u8 num_uhq_pages_in_ring;
|
||||
u8 ll2_rx_queue_id;
|
||||
u8 ooo_enable;
|
||||
u8 flags;
|
||||
#define ISCSI_SPE_FUNC_INIT_COUNTERS_EN_MASK 0x1
|
||||
#define ISCSI_SPE_FUNC_INIT_COUNTERS_EN_SHIFT 0
|
||||
#define ISCSI_SPE_FUNC_INIT_RESERVED0_MASK 0x7F
|
||||
#define ISCSI_SPE_FUNC_INIT_RESERVED0_SHIFT 1
|
||||
struct iscsi_debug_modes debug_mode;
|
||||
__le16 reserved1;
|
||||
__le32 reserved2;
|
||||
__le32 reserved3;
|
||||
__le32 reserved4;
|
||||
struct scsi_init_func_params func_params;
|
||||
struct scsi_init_func_queues q_params;
|
||||
};
|
||||
|
@ -1242,6 +1342,7 @@ enum iscsi_task_type {
|
|||
ISCSI_TASK_TYPE_TARGET_READ,
|
||||
ISCSI_TASK_TYPE_TARGET_RESPONSE,
|
||||
ISCSI_TASK_TYPE_LOGIN_RESPONSE,
|
||||
ISCSI_TASK_TYPE_TARGET_IMM_W_DIF,
|
||||
MAX_ISCSI_TASK_TYPE
|
||||
};
|
||||
|
||||
|
@ -1326,6 +1427,7 @@ struct iscsi_xhqe {
|
|||
/* Per PF iSCSI receive path statistics - mStorm RAM structure */
|
||||
struct mstorm_iscsi_stats_drv {
|
||||
struct regpair iscsi_rx_dropped_pdus_task_not_valid;
|
||||
struct regpair iscsi_rx_dup_ack_cnt;
|
||||
};
|
||||
|
||||
/* Per PF iSCSI transmit path statistics - pStorm RAM structure */
|
||||
|
@ -1339,6 +1441,9 @@ struct tstorm_iscsi_stats_drv {
|
|||
struct regpair iscsi_rx_bytes_cnt;
|
||||
struct regpair iscsi_rx_packet_cnt;
|
||||
struct regpair iscsi_rx_new_ooo_isle_events_cnt;
|
||||
struct regpair iscsi_rx_tcp_payload_bytes_cnt;
|
||||
struct regpair iscsi_rx_tcp_pkt_cnt;
|
||||
struct regpair iscsi_rx_pure_ack_cnt;
|
||||
__le32 iscsi_cmdq_threshold_cnt;
|
||||
__le32 iscsi_rq_threshold_cnt;
|
||||
__le32 iscsi_immq_threshold_cnt;
|
||||
|
@ -1355,6 +1460,8 @@ struct ustorm_iscsi_stats_drv {
|
|||
struct xstorm_iscsi_stats_drv {
|
||||
struct regpair iscsi_tx_go_to_slow_start_event_cnt;
|
||||
struct regpair iscsi_tx_fast_retransmit_event_cnt;
|
||||
struct regpair iscsi_tx_pure_ack_cnt;
|
||||
struct regpair iscsi_tx_delayed_ack_cnt;
|
||||
};
|
||||
|
||||
/* Per PF iSCSI transmit path statistics - yStorm RAM structure */
|
||||
|
@ -1362,6 +1469,8 @@ struct ystorm_iscsi_stats_drv {
|
|||
struct regpair iscsi_tx_data_pdu_cnt;
|
||||
struct regpair iscsi_tx_r2t_pdu_cnt;
|
||||
struct regpair iscsi_tx_total_pdu_cnt;
|
||||
struct regpair iscsi_tx_tcp_payload_bytes_cnt;
|
||||
struct regpair iscsi_tx_tcp_pkt_cnt;
|
||||
};
|
||||
|
||||
struct e4_tstorm_iscsi_task_ag_ctx {
|
||||
|
|
|
@ -61,6 +61,35 @@ struct qed_txq_start_ret_params {
|
|||
void *p_handle;
|
||||
};
|
||||
|
||||
enum qed_filter_config_mode {
|
||||
QED_FILTER_CONFIG_MODE_DISABLE,
|
||||
QED_FILTER_CONFIG_MODE_5_TUPLE,
|
||||
QED_FILTER_CONFIG_MODE_L4_PORT,
|
||||
QED_FILTER_CONFIG_MODE_IP_DEST,
|
||||
};
|
||||
|
||||
struct qed_ntuple_filter_params {
|
||||
/* Physically mapped address containing header of buffer to be used
|
||||
* as filter.
|
||||
*/
|
||||
dma_addr_t addr;
|
||||
|
||||
/* Length of header in bytes */
|
||||
u16 length;
|
||||
|
||||
/* Relative queue-id to receive classified packet */
|
||||
#define QED_RFS_NTUPLE_QID_RSS ((u16)-1)
|
||||
u16 qid;
|
||||
|
||||
/* Identifier can either be according to vport-id or vfid */
|
||||
bool b_is_vf;
|
||||
u8 vport_id;
|
||||
u8 vf_id;
|
||||
|
||||
/* true iff this filter is to be added. Else to be removed */
|
||||
bool b_is_add;
|
||||
};
|
||||
|
||||
struct qed_dev_eth_info {
|
||||
struct qed_dev_info common;
|
||||
|
||||
|
@ -316,13 +345,12 @@ struct qed_eth_ops {
|
|||
int (*tunn_config)(struct qed_dev *cdev,
|
||||
struct qed_tunn_params *params);
|
||||
|
||||
int (*ntuple_filter_config)(struct qed_dev *cdev, void *cookie,
|
||||
dma_addr_t mapping, u16 length,
|
||||
u16 vport_id, u16 rx_queue_id,
|
||||
bool add_filter);
|
||||
int (*ntuple_filter_config)(struct qed_dev *cdev,
|
||||
void *cookie,
|
||||
struct qed_ntuple_filter_params *params);
|
||||
|
||||
int (*configure_arfs_searcher)(struct qed_dev *cdev,
|
||||
bool en_searcher);
|
||||
enum qed_filter_config_mode mode);
|
||||
int (*get_coalesce)(struct qed_dev *cdev, u16 *coal, void *handle);
|
||||
};
|
||||
|
||||
|
|
|
@ -244,16 +244,11 @@ struct qed_fcoe_pf_params {
|
|||
/* Most of the the parameters below are described in the FW iSCSI / TCP HSI */
|
||||
struct qed_iscsi_pf_params {
|
||||
u64 glbl_q_params_addr;
|
||||
u64 bdq_pbl_base_addr[2];
|
||||
u32 max_cwnd;
|
||||
u64 bdq_pbl_base_addr[3];
|
||||
u16 cq_num_entries;
|
||||
u16 cmdq_num_entries;
|
||||
u32 two_msl_timer;
|
||||
u16 dup_ack_threshold;
|
||||
u16 tx_sws_timer;
|
||||
u16 min_rto;
|
||||
u16 min_rto_rt;
|
||||
u16 max_rto;
|
||||
|
||||
/* The following parameters are used during HW-init
|
||||
* and these parameters need to be passed as arguments
|
||||
|
@ -264,8 +259,8 @@ struct qed_iscsi_pf_params {
|
|||
|
||||
/* The following parameters are used during protocol-init */
|
||||
u16 half_way_close_timeout;
|
||||
u16 bdq_xoff_threshold[2];
|
||||
u16 bdq_xon_threshold[2];
|
||||
u16 bdq_xoff_threshold[3];
|
||||
u16 bdq_xon_threshold[3];
|
||||
u16 cmdq_xoff_threshold;
|
||||
u16 cmdq_xon_threshold;
|
||||
u16 rq_buffer_size;
|
||||
|
@ -281,10 +276,11 @@ struct qed_iscsi_pf_params {
|
|||
u8 gl_cmd_pi;
|
||||
u8 debug_mode;
|
||||
u8 ll2_ooo_queue_id;
|
||||
u8 ooo_enable;
|
||||
|
||||
u8 is_target;
|
||||
u8 bdq_pbl_num_entries[2];
|
||||
u8 is_soc_en;
|
||||
u8 soc_num_of_blocks_log;
|
||||
u8 bdq_pbl_num_entries[3];
|
||||
};
|
||||
|
||||
struct qed_rdma_pf_params {
|
||||
|
|
|
@ -102,7 +102,6 @@ struct qed_iscsi_params_offload {
|
|||
u32 ss_thresh;
|
||||
u16 srtt;
|
||||
u16 rtt_var;
|
||||
u32 ts_time;
|
||||
u32 ts_recent;
|
||||
u32 ts_recent_age;
|
||||
u32 total_rt;
|
||||
|
@ -124,7 +123,6 @@ struct qed_iscsi_params_offload {
|
|||
u16 mss;
|
||||
u8 snd_wnd_scale;
|
||||
u8 rcv_wnd_scale;
|
||||
u32 ts_ticks_per_second;
|
||||
u16 da_timeout_value;
|
||||
u8 ack_frequency;
|
||||
};
|
||||
|
|
|
@ -116,7 +116,7 @@ struct qed_ll2_comp_rx_data {
|
|||
u32 opaque_data_1;
|
||||
|
||||
/* GSI only */
|
||||
u32 gid_dst[4];
|
||||
u32 src_qp;
|
||||
u16 qp_id;
|
||||
|
||||
union {
|
||||
|
|
|
@ -37,21 +37,45 @@
|
|||
/* SCSI CONSTANTS */
|
||||
/*********************/
|
||||
|
||||
#define NUM_OF_CMDQS_CQS (NUM_OF_GLOBAL_QUEUES / 2)
|
||||
#define SCSI_MAX_NUM_OF_CMDQS (NUM_OF_GLOBAL_QUEUES / 2)
|
||||
#define BDQ_NUM_RESOURCES (4)
|
||||
|
||||
#define BDQ_ID_RQ (0)
|
||||
#define BDQ_ID_IMM_DATA (1)
|
||||
#define BDQ_NUM_IDS (2)
|
||||
#define BDQ_ID_TQ (2)
|
||||
#define BDQ_NUM_IDS (3)
|
||||
|
||||
#define SCSI_NUM_SGES_SLOW_SGL_THR 8
|
||||
|
||||
#define BDQ_MAX_EXTERNAL_RING_SIZE BIT(15)
|
||||
|
||||
/* SCSI op codes */
|
||||
#define SCSI_OPCODE_COMPARE_AND_WRITE (0x89)
|
||||
#define SCSI_OPCODE_READ_10 (0x28)
|
||||
#define SCSI_OPCODE_WRITE_6 (0x0A)
|
||||
#define SCSI_OPCODE_WRITE_10 (0x2A)
|
||||
#define SCSI_OPCODE_WRITE_12 (0xAA)
|
||||
#define SCSI_OPCODE_WRITE_16 (0x8A)
|
||||
#define SCSI_OPCODE_WRITE_AND_VERIFY_10 (0x2E)
|
||||
#define SCSI_OPCODE_WRITE_AND_VERIFY_12 (0xAE)
|
||||
#define SCSI_OPCODE_WRITE_AND_VERIFY_16 (0x8E)
|
||||
|
||||
/* iSCSI Drv opaque */
|
||||
struct iscsi_drv_opaque {
|
||||
__le16 reserved_zero[3];
|
||||
__le16 opaque;
|
||||
};
|
||||
|
||||
/* Scsi 2B/8B opaque union */
|
||||
union scsi_opaque {
|
||||
struct regpair fcoe_opaque;
|
||||
struct iscsi_drv_opaque iscsi_opaque;
|
||||
};
|
||||
|
||||
/* SCSI buffer descriptor */
|
||||
struct scsi_bd {
|
||||
struct regpair address;
|
||||
struct regpair opaque;
|
||||
union scsi_opaque opaque;
|
||||
};
|
||||
|
||||
/* Scsi Drv BDQ struct */
|
||||
|
@ -101,21 +125,24 @@ struct scsi_init_func_queues {
|
|||
#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_SHIFT 1
|
||||
#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_MASK 0x1
|
||||
#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_SHIFT 2
|
||||
#define SCSI_INIT_FUNC_QUEUES_RESERVED_VALID_MASK 0x1F
|
||||
#define SCSI_INIT_FUNC_QUEUES_RESERVED_VALID_SHIFT 3
|
||||
#define SCSI_INIT_FUNC_QUEUES_TQ_VALID_MASK 0x1
|
||||
#define SCSI_INIT_FUNC_QUEUES_TQ_VALID_SHIFT 3
|
||||
#define SCSI_INIT_FUNC_QUEUES_SOC_EN_MASK 0x1
|
||||
#define SCSI_INIT_FUNC_QUEUES_SOC_EN_SHIFT 4
|
||||
#define SCSI_INIT_FUNC_QUEUES_SOC_NUM_OF_BLOCKS_LOG_MASK 0x7
|
||||
#define SCSI_INIT_FUNC_QUEUES_SOC_NUM_OF_BLOCKS_LOG_SHIFT 5
|
||||
__le16 cq_cmdq_sb_num_arr[SCSI_MAX_NUM_OF_CMDQS];
|
||||
u8 num_queues;
|
||||
u8 queue_relative_offset;
|
||||
u8 cq_sb_pi;
|
||||
u8 cmdq_sb_pi;
|
||||
__le16 cq_cmdq_sb_num_arr[NUM_OF_CMDQS_CQS];
|
||||
__le16 reserved0;
|
||||
u8 bdq_pbl_num_entries[BDQ_NUM_IDS];
|
||||
u8 reserved1;
|
||||
struct regpair bdq_pbl_base_address[BDQ_NUM_IDS];
|
||||
__le16 bdq_xoff_threshold[BDQ_NUM_IDS];
|
||||
__le16 bdq_xon_threshold[BDQ_NUM_IDS];
|
||||
__le16 cmdq_xoff_threshold;
|
||||
__le16 bdq_xon_threshold[BDQ_NUM_IDS];
|
||||
__le16 cmdq_xon_threshold;
|
||||
__le32 reserved1;
|
||||
};
|
||||
|
||||
/* Scsi Drv BDQ Data struct (2 BDQ IDs: 0 - RQ, 1 - Immediate Data) */
|
||||
|
@ -147,4 +174,9 @@ struct scsi_terminate_extra_params {
|
|||
u8 reserved[4];
|
||||
};
|
||||
|
||||
/* SCSI Task Queue Element */
|
||||
struct scsi_tqe {
|
||||
__le16 itid;
|
||||
};
|
||||
|
||||
#endif /* __STORAGE_COMMON__ */
|
||||
|
|
|
@ -79,24 +79,29 @@ struct tcp_offload_params {
|
|||
__le16 remote_mac_addr_mid;
|
||||
__le16 remote_mac_addr_hi;
|
||||
__le16 vlan_id;
|
||||
u8 flags;
|
||||
__le16 flags;
|
||||
#define TCP_OFFLOAD_PARAMS_TS_EN_MASK 0x1
|
||||
#define TCP_OFFLOAD_PARAMS_TS_EN_SHIFT 0
|
||||
#define TCP_OFFLOAD_PARAMS_DA_EN_MASK 0x1
|
||||
#define TCP_OFFLOAD_PARAMS_DA_EN_SHIFT 1
|
||||
#define TCP_OFFLOAD_PARAMS_KA_EN_MASK 0x1
|
||||
#define TCP_OFFLOAD_PARAMS_KA_EN_SHIFT 2
|
||||
#define TCP_OFFLOAD_PARAMS_ECN_SENDER_EN_MASK 0x1
|
||||
#define TCP_OFFLOAD_PARAMS_ECN_SENDER_EN_SHIFT 3
|
||||
#define TCP_OFFLOAD_PARAMS_ECN_RECEIVER_EN_MASK 0x1
|
||||
#define TCP_OFFLOAD_PARAMS_ECN_RECEIVER_EN_SHIFT 4
|
||||
#define TCP_OFFLOAD_PARAMS_NAGLE_EN_MASK 0x1
|
||||
#define TCP_OFFLOAD_PARAMS_NAGLE_EN_SHIFT 3
|
||||
#define TCP_OFFLOAD_PARAMS_NAGLE_EN_SHIFT 5
|
||||
#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_MASK 0x1
|
||||
#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_SHIFT 4
|
||||
#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_SHIFT 6
|
||||
#define TCP_OFFLOAD_PARAMS_FIN_SENT_MASK 0x1
|
||||
#define TCP_OFFLOAD_PARAMS_FIN_SENT_SHIFT 5
|
||||
#define TCP_OFFLOAD_PARAMS_FIN_SENT_SHIFT 7
|
||||
#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_MASK 0x1
|
||||
#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_SHIFT 6
|
||||
#define TCP_OFFLOAD_PARAMS_RESERVED0_MASK 0x1
|
||||
#define TCP_OFFLOAD_PARAMS_RESERVED0_SHIFT 7
|
||||
#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_SHIFT 8
|
||||
#define TCP_OFFLOAD_PARAMS_RESERVED_MASK 0x7F
|
||||
#define TCP_OFFLOAD_PARAMS_RESERVED_SHIFT 9
|
||||
u8 ip_version;
|
||||
u8 reserved0[3];
|
||||
__le32 remote_ip[4];
|
||||
__le32 local_ip[4];
|
||||
__le32 flow_label;
|
||||
|
@ -108,17 +113,21 @@ struct tcp_offload_params {
|
|||
u8 rcv_wnd_scale;
|
||||
u8 connect_mode;
|
||||
__le16 srtt;
|
||||
__le32 cwnd;
|
||||
__le32 ss_thresh;
|
||||
__le16 reserved1;
|
||||
__le32 rcv_wnd;
|
||||
__le32 cwnd;
|
||||
u8 ka_max_probe_cnt;
|
||||
u8 dup_ack_theshold;
|
||||
__le16 reserved1;
|
||||
__le32 ka_timeout;
|
||||
__le32 ka_interval;
|
||||
__le32 max_rt_time;
|
||||
__le32 initial_rcv_wnd;
|
||||
__le32 rcv_next;
|
||||
__le32 snd_una;
|
||||
__le32 snd_next;
|
||||
__le32 snd_max;
|
||||
__le32 snd_wnd;
|
||||
__le32 rcv_wnd;
|
||||
__le32 snd_wl1;
|
||||
__le32 ts_recent;
|
||||
__le32 ts_recent_age;
|
||||
|
@ -131,14 +140,10 @@ struct tcp_offload_params {
|
|||
u8 rt_cnt;
|
||||
__le16 rtt_var;
|
||||
__le16 fw_internal;
|
||||
__le32 ka_timeout;
|
||||
__le32 ka_interval;
|
||||
__le32 max_rt_time;
|
||||
__le32 initial_rcv_wnd;
|
||||
u8 snd_wnd_scale;
|
||||
u8 ack_frequency;
|
||||
__le16 da_timeout_value;
|
||||
__le32 reserved3[2];
|
||||
__le32 reserved3;
|
||||
};
|
||||
|
||||
/* tcp offload parameters */
|
||||
|
@ -150,16 +155,19 @@ struct tcp_offload_params_opt2 {
|
|||
__le16 remote_mac_addr_mid;
|
||||
__le16 remote_mac_addr_hi;
|
||||
__le16 vlan_id;
|
||||
u8 flags;
|
||||
__le16 flags;
|
||||
#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_MASK 0x1
|
||||
#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_SHIFT 0
|
||||
#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_MASK 0x1
|
||||
#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_SHIFT 1
|
||||
#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_MASK 0x1
|
||||
#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_SHIFT 2
|
||||
#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_MASK 0x1F
|
||||
#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_SHIFT 3
|
||||
#define TCP_OFFLOAD_PARAMS_OPT2_ECN_EN_MASK 0x1
|
||||
#define TCP_OFFLOAD_PARAMS_OPT2_ECN_EN_SHIFT 3
|
||||
#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_MASK 0xFFF
|
||||
#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_SHIFT 4
|
||||
u8 ip_version;
|
||||
u8 reserved1[3];
|
||||
__le32 remote_ip[4];
|
||||
__le32 local_ip[4];
|
||||
__le32 flow_label;
|
||||
|
@ -173,7 +181,13 @@ struct tcp_offload_params_opt2 {
|
|||
__le16 syn_ip_payload_length;
|
||||
__le32 syn_phy_addr_lo;
|
||||
__le32 syn_phy_addr_hi;
|
||||
__le32 reserved1[22];
|
||||
__le32 cwnd;
|
||||
u8 ka_max_probe_cnt;
|
||||
u8 reserved2[3];
|
||||
__le32 ka_timeout;
|
||||
__le32 ka_interval;
|
||||
__le32 max_rt_time;
|
||||
__le32 reserved3[16];
|
||||
};
|
||||
|
||||
/* tcp IPv4/IPv6 enum */
|
||||
|
|
Loading…
Reference in New Issue