mirror of https://gitee.com/openkylin/linux.git
Merge branch 'cxgb4'
Hariprasad Shenai says: ==================== Adds support for CIQ and other misc. fixes for rdma/cxgb4 This patch series adds support to allocate and use IQs specifically for indirect interrupts, adds fixes to align ISS for iWARP connections & fixes related to tcp snd/rvd window for Chelsio T4/T5 adapters on iw_cxgb4. Also changes Interrupt Holdoff Packet Count threshold of response queues for cxgb4 driver. The patches series is created against 'net-next' tree. And includes patches on cxgb4 and iw_cxgb4 driver. Since this patch-series contains cxgb4 and iw_cxgb4 patches, we would like to request this patch series to get merged via David Miller's 'net-next' tree. We have included all the maintainers of respective drivers. Kindly review the change and let us know in case of any review comments. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
c4d4c255d8
|
@ -232,12 +232,16 @@ static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb)
|
|||
|
||||
static void set_emss(struct c4iw_ep *ep, u16 opt)
|
||||
{
|
||||
ep->emss = ep->com.dev->rdev.lldi.mtus[GET_TCPOPT_MSS(opt)] - 40;
|
||||
ep->emss = ep->com.dev->rdev.lldi.mtus[GET_TCPOPT_MSS(opt)] -
|
||||
sizeof(struct iphdr) - sizeof(struct tcphdr);
|
||||
ep->mss = ep->emss;
|
||||
if (GET_TCPOPT_TSTAMP(opt))
|
||||
ep->emss -= 12;
|
||||
if (ep->emss < 128)
|
||||
ep->emss = 128;
|
||||
if (ep->emss & 7)
|
||||
PDBG("Warning: misaligned mtu idx %u mss %u emss=%u\n",
|
||||
GET_TCPOPT_MSS(opt), ep->mss, ep->emss);
|
||||
PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, GET_TCPOPT_MSS(opt),
|
||||
ep->mss, ep->emss);
|
||||
}
|
||||
|
@ -468,7 +472,7 @@ static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
|
|||
flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
|
||||
flowc->mnemval[5].val = cpu_to_be32(ep->rcv_seq);
|
||||
flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
|
||||
flowc->mnemval[6].val = cpu_to_be32(snd_win);
|
||||
flowc->mnemval[6].val = cpu_to_be32(ep->snd_win);
|
||||
flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
|
||||
flowc->mnemval[7].val = cpu_to_be32(ep->emss);
|
||||
/* Pad WR to 16 byte boundary */
|
||||
|
@ -528,6 +532,17 @@ static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
|
|||
return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
|
||||
}
|
||||
|
||||
static void best_mtu(const unsigned short *mtus, unsigned short mtu,
|
||||
unsigned int *idx, int use_ts)
|
||||
{
|
||||
unsigned short hdr_size = sizeof(struct iphdr) +
|
||||
sizeof(struct tcphdr) +
|
||||
(use_ts ? 12 : 0);
|
||||
unsigned short data_size = mtu - hdr_size;
|
||||
|
||||
cxgb4_best_aligned_mtu(mtus, hdr_size, data_size, 8, idx);
|
||||
}
|
||||
|
||||
static int send_connect(struct c4iw_ep *ep)
|
||||
{
|
||||
struct cpl_act_open_req *req;
|
||||
|
@ -550,6 +565,7 @@ static int send_connect(struct c4iw_ep *ep)
|
|||
struct sockaddr_in *ra = (struct sockaddr_in *)&ep->com.remote_addr;
|
||||
struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)&ep->com.local_addr;
|
||||
struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)&ep->com.remote_addr;
|
||||
int win;
|
||||
|
||||
wrlen = (ep->com.remote_addr.ss_family == AF_INET) ?
|
||||
roundup(sizev4, 16) :
|
||||
|
@ -565,8 +581,18 @@ static int send_connect(struct c4iw_ep *ep)
|
|||
}
|
||||
set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
|
||||
|
||||
cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
|
||||
best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
|
||||
enable_tcp_timestamps);
|
||||
wscale = compute_wscale(rcv_win);
|
||||
|
||||
/*
|
||||
* Specify the largest window that will fit in opt0. The
|
||||
* remainder will be specified in the rx_data_ack.
|
||||
*/
|
||||
win = ep->rcv_win >> 10;
|
||||
if (win > RCV_BUFSIZ_MASK)
|
||||
win = RCV_BUFSIZ_MASK;
|
||||
|
||||
opt0 = (nocong ? NO_CONG(1) : 0) |
|
||||
KEEP_ALIVE(1) |
|
||||
DELACK(1) |
|
||||
|
@ -577,7 +603,7 @@ static int send_connect(struct c4iw_ep *ep)
|
|||
SMAC_SEL(ep->smac_idx) |
|
||||
DSCP(ep->tos) |
|
||||
ULP_MODE(ULP_MODE_TCPDDP) |
|
||||
RCV_BUFSIZ(rcv_win>>10);
|
||||
RCV_BUFSIZ(win);
|
||||
opt2 = RX_CHANNEL(0) |
|
||||
CCTRL_ECN(enable_ecn) |
|
||||
RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
|
||||
|
@ -633,6 +659,13 @@ static int send_connect(struct c4iw_ep *ep)
|
|||
req6->opt2 = cpu_to_be32(opt2);
|
||||
}
|
||||
} else {
|
||||
u32 isn = (prandom_u32() & ~7UL) - 1;
|
||||
|
||||
opt2 |= T5_OPT_2_VALID;
|
||||
opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */
|
||||
if (peer2peer)
|
||||
isn += 4;
|
||||
|
||||
if (ep->com.remote_addr.ss_family == AF_INET) {
|
||||
t5_req = (struct cpl_t5_act_open_req *)
|
||||
skb_put(skb, wrlen);
|
||||
|
@ -649,6 +682,9 @@ static int send_connect(struct c4iw_ep *ep)
|
|||
cxgb4_select_ntuple(
|
||||
ep->com.dev->rdev.lldi.ports[0],
|
||||
ep->l2t)));
|
||||
t5_req->rsvd = cpu_to_be32(isn);
|
||||
PDBG("%s snd_isn %u\n", __func__,
|
||||
be32_to_cpu(t5_req->rsvd));
|
||||
t5_req->opt2 = cpu_to_be32(opt2);
|
||||
} else {
|
||||
t5_req6 = (struct cpl_t5_act_open_req6 *)
|
||||
|
@ -672,6 +708,9 @@ static int send_connect(struct c4iw_ep *ep)
|
|||
cxgb4_select_ntuple(
|
||||
ep->com.dev->rdev.lldi.ports[0],
|
||||
ep->l2t));
|
||||
t5_req6->rsvd = cpu_to_be32(isn);
|
||||
PDBG("%s snd_isn %u\n", __func__,
|
||||
be32_to_cpu(t5_req6->rsvd));
|
||||
t5_req6->opt2 = cpu_to_be32(opt2);
|
||||
}
|
||||
}
|
||||
|
@ -1145,6 +1184,14 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we couldn't specify the entire rcv window at connection setup
|
||||
* due to the limit in the number of bits in the RCV_BUFSIZ field,
|
||||
* then add the overage in to the credits returned.
|
||||
*/
|
||||
if (ep->rcv_win > RCV_BUFSIZ_MASK * 1024)
|
||||
credits += ep->rcv_win - RCV_BUFSIZ_MASK * 1024;
|
||||
|
||||
req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen);
|
||||
memset(req, 0, wrlen);
|
||||
INIT_TP_WR(req, ep->hwtid);
|
||||
|
@ -1618,6 +1665,7 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
|
|||
unsigned int mtu_idx;
|
||||
int wscale;
|
||||
struct sockaddr_in *sin;
|
||||
int win;
|
||||
|
||||
skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
|
||||
req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req));
|
||||
|
@ -1640,8 +1688,18 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
|
|||
htons(F_FW_OFLD_CONNECTION_WR_CPLRXDATAACK);
|
||||
req->tcb.tx_max = (__force __be32) jiffies;
|
||||
req->tcb.rcv_adv = htons(1);
|
||||
cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
|
||||
best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
|
||||
enable_tcp_timestamps);
|
||||
wscale = compute_wscale(rcv_win);
|
||||
|
||||
/*
|
||||
* Specify the largest window that will fit in opt0. The
|
||||
* remainder will be specified in the rx_data_ack.
|
||||
*/
|
||||
win = ep->rcv_win >> 10;
|
||||
if (win > RCV_BUFSIZ_MASK)
|
||||
win = RCV_BUFSIZ_MASK;
|
||||
|
||||
req->tcb.opt0 = (__force __be64) (TCAM_BYPASS(1) |
|
||||
(nocong ? NO_CONG(1) : 0) |
|
||||
KEEP_ALIVE(1) |
|
||||
|
@ -1653,7 +1711,7 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
|
|||
SMAC_SEL(ep->smac_idx) |
|
||||
DSCP(ep->tos) |
|
||||
ULP_MODE(ULP_MODE_TCPDDP) |
|
||||
RCV_BUFSIZ(rcv_win >> 10));
|
||||
RCV_BUFSIZ(win));
|
||||
req->tcb.opt2 = (__force __be32) (PACE(1) |
|
||||
TX_QUEUE(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) |
|
||||
RX_CHANNEL(0) |
|
||||
|
@ -1690,6 +1748,13 @@ static int is_neg_adv(unsigned int status)
|
|||
status == CPL_ERR_KEEPALV_NEG_ADVICE;
|
||||
}
|
||||
|
||||
static void set_tcp_window(struct c4iw_ep *ep, struct port_info *pi)
|
||||
{
|
||||
ep->snd_win = snd_win;
|
||||
ep->rcv_win = rcv_win;
|
||||
PDBG("%s snd_win %d rcv_win %d\n", __func__, ep->snd_win, ep->rcv_win);
|
||||
}
|
||||
|
||||
#define ACT_OPEN_RETRY_COUNT 2
|
||||
|
||||
static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
|
||||
|
@ -1738,6 +1803,7 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
|
|||
ep->ctrlq_idx = cxgb4_port_idx(pdev);
|
||||
ep->rss_qid = cdev->rdev.lldi.rxq_ids[
|
||||
cxgb4_port_idx(pdev) * step];
|
||||
set_tcp_window(ep, (struct port_info *)netdev_priv(pdev));
|
||||
dev_put(pdev);
|
||||
} else {
|
||||
pdev = get_real_dev(n->dev);
|
||||
|
@ -1756,6 +1822,7 @@ static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
|
|||
cdev->rdev.lldi.nchan;
|
||||
ep->rss_qid = cdev->rdev.lldi.rxq_ids[
|
||||
cxgb4_port_idx(n->dev) * step];
|
||||
set_tcp_window(ep, (struct port_info *)netdev_priv(pdev));
|
||||
|
||||
if (clear_mpa_v1) {
|
||||
ep->retry_with_mpa_v1 = 0;
|
||||
|
@ -1986,13 +2053,36 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
|
|||
u64 opt0;
|
||||
u32 opt2;
|
||||
int wscale;
|
||||
struct cpl_t5_pass_accept_rpl *rpl5 = NULL;
|
||||
int win;
|
||||
|
||||
PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
|
||||
BUG_ON(skb_cloned(skb));
|
||||
skb_trim(skb, sizeof(*rpl));
|
||||
|
||||
skb_get(skb);
|
||||
cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
|
||||
rpl = cplhdr(skb);
|
||||
if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
|
||||
skb_trim(skb, roundup(sizeof(*rpl5), 16));
|
||||
rpl5 = (void *)rpl;
|
||||
INIT_TP_WR(rpl5, ep->hwtid);
|
||||
} else {
|
||||
skb_trim(skb, sizeof(*rpl));
|
||||
INIT_TP_WR(rpl, ep->hwtid);
|
||||
}
|
||||
OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
|
||||
ep->hwtid));
|
||||
|
||||
best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
|
||||
enable_tcp_timestamps && req->tcpopt.tstamp);
|
||||
wscale = compute_wscale(rcv_win);
|
||||
|
||||
/*
|
||||
* Specify the largest window that will fit in opt0. The
|
||||
* remainder will be specified in the rx_data_ack.
|
||||
*/
|
||||
win = ep->rcv_win >> 10;
|
||||
if (win > RCV_BUFSIZ_MASK)
|
||||
win = RCV_BUFSIZ_MASK;
|
||||
opt0 = (nocong ? NO_CONG(1) : 0) |
|
||||
KEEP_ALIVE(1) |
|
||||
DELACK(1) |
|
||||
|
@ -2003,7 +2093,7 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
|
|||
SMAC_SEL(ep->smac_idx) |
|
||||
DSCP(ep->tos >> 2) |
|
||||
ULP_MODE(ULP_MODE_TCPDDP) |
|
||||
RCV_BUFSIZ(rcv_win>>10);
|
||||
RCV_BUFSIZ(win);
|
||||
opt2 = RX_CHANNEL(0) |
|
||||
RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
|
||||
|
||||
|
@ -2023,14 +2113,18 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
|
|||
opt2 |= CCTRL_ECN(1);
|
||||
}
|
||||
if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
|
||||
u32 isn = (prandom_u32() & ~7UL) - 1;
|
||||
opt2 |= T5_OPT_2_VALID;
|
||||
opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
|
||||
opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */
|
||||
rpl5 = (void *)rpl;
|
||||
memset(&rpl5->iss, 0, roundup(sizeof(*rpl5)-sizeof(*rpl), 16));
|
||||
if (peer2peer)
|
||||
isn += 4;
|
||||
rpl5->iss = cpu_to_be32(isn);
|
||||
PDBG("%s iss %u\n", __func__, be32_to_cpu(rpl5->iss));
|
||||
}
|
||||
|
||||
rpl = cplhdr(skb);
|
||||
INIT_TP_WR(rpl, ep->hwtid);
|
||||
OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
|
||||
ep->hwtid));
|
||||
rpl->opt0 = cpu_to_be64(opt0);
|
||||
rpl->opt2 = cpu_to_be32(opt2);
|
||||
set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
|
||||
|
@ -2095,6 +2189,7 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
|
|||
int err;
|
||||
u16 peer_mss = ntohs(req->tcpopt.mss);
|
||||
int iptype;
|
||||
unsigned short hdrs;
|
||||
|
||||
parent_ep = lookup_stid(t, stid);
|
||||
if (!parent_ep) {
|
||||
|
@ -2152,8 +2247,10 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
|
|||
goto reject;
|
||||
}
|
||||
|
||||
if (peer_mss && child_ep->mtu > (peer_mss + 40))
|
||||
child_ep->mtu = peer_mss + 40;
|
||||
hdrs = sizeof(struct iphdr) + sizeof(struct tcphdr) +
|
||||
((enable_tcp_timestamps && req->tcpopt.tstamp) ? 12 : 0);
|
||||
if (peer_mss && child_ep->mtu > (peer_mss + hdrs))
|
||||
child_ep->mtu = peer_mss + hdrs;
|
||||
|
||||
state_set(&child_ep->com, CONNECTING);
|
||||
child_ep->com.dev = dev;
|
||||
|
|
|
@ -134,7 +134,8 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
|
|||
V_FW_RI_RES_WR_IQANUS(0) |
|
||||
V_FW_RI_RES_WR_IQANUD(1) |
|
||||
F_FW_RI_RES_WR_IQANDST |
|
||||
V_FW_RI_RES_WR_IQANDSTINDEX(*rdev->lldi.rxq_ids));
|
||||
V_FW_RI_RES_WR_IQANDSTINDEX(
|
||||
rdev->lldi.ciq_ids[cq->vector]));
|
||||
res->u.cq.iqdroprss_to_iqesize = cpu_to_be16(
|
||||
F_FW_RI_RES_WR_IQDROPRSS |
|
||||
V_FW_RI_RES_WR_IQPCIECH(2) |
|
||||
|
@ -870,6 +871,9 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
|
|||
|
||||
rhp = to_c4iw_dev(ibdev);
|
||||
|
||||
if (vector >= rhp->rdev.lldi.nciq)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
chp = kzalloc(sizeof(*chp), GFP_KERNEL);
|
||||
if (!chp)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
@ -915,6 +919,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
|
|||
}
|
||||
chp->cq.size = hwentries;
|
||||
chp->cq.memsize = memsize;
|
||||
chp->cq.vector = vector;
|
||||
|
||||
ret = create_cq(&rhp->rdev, &chp->cq,
|
||||
ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
|
||||
|
|
|
@ -805,6 +805,8 @@ struct c4iw_ep {
|
|||
u8 retry_with_mpa_v1;
|
||||
u8 tried_with_mpa_v1;
|
||||
unsigned int retry_count;
|
||||
int snd_win;
|
||||
int rcv_win;
|
||||
};
|
||||
|
||||
static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id)
|
||||
|
|
|
@ -499,7 +499,7 @@ int c4iw_register_device(struct c4iw_dev *dev)
|
|||
dev->ibdev.node_type = RDMA_NODE_RNIC;
|
||||
memcpy(dev->ibdev.node_desc, C4IW_NODE_DESC, sizeof(C4IW_NODE_DESC));
|
||||
dev->ibdev.phys_port_cnt = dev->rdev.lldi.nports;
|
||||
dev->ibdev.num_comp_vectors = 1;
|
||||
dev->ibdev.num_comp_vectors = dev->rdev.lldi.nciq;
|
||||
dev->ibdev.dma_device = &(dev->rdev.lldi.pdev->dev);
|
||||
dev->ibdev.query_device = c4iw_query_device;
|
||||
dev->ibdev.query_port = c4iw_query_port;
|
||||
|
|
|
@ -542,6 +542,7 @@ struct t4_cq {
|
|||
size_t memsize;
|
||||
__be64 bits_type_ts;
|
||||
u32 cqid;
|
||||
int vector;
|
||||
u16 size; /* including status page */
|
||||
u16 cidx;
|
||||
u16 sw_pidx;
|
||||
|
|
|
@ -848,6 +848,7 @@ enum { /* TCP congestion control algorithms */
|
|||
#define V_CONG_CNTRL(x) ((x) << S_CONG_CNTRL)
|
||||
#define G_CONG_CNTRL(x) (((x) >> S_CONG_CNTRL) & M_CONG_CNTRL)
|
||||
|
||||
#define CONG_CNTRL_VALID (1 << 18)
|
||||
#define T5_OPT_2_VALID (1 << 31)
|
||||
|
||||
#endif /* _T4FW_RI_API_H_ */
|
||||
|
|
|
@ -357,11 +357,17 @@ enum {
|
|||
MAX_OFLD_QSETS = 16, /* # of offload Tx/Rx queue sets */
|
||||
MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */
|
||||
MAX_RDMA_QUEUES = NCHAN, /* # of streaming RDMA Rx queues */
|
||||
MAX_RDMA_CIQS = NCHAN, /* # of RDMA concentrator IQs */
|
||||
MAX_ISCSI_QUEUES = NCHAN, /* # of streaming iSCSI Rx queues */
|
||||
};
|
||||
|
||||
enum {
|
||||
MAX_EGRQ = 128, /* max # of egress queues, including FLs */
|
||||
MAX_INGQ = 64 /* max # of interrupt-capable ingress queues */
|
||||
INGQ_EXTRAS = 2, /* firmware event queue and */
|
||||
/* forwarded interrupts */
|
||||
MAX_EGRQ = MAX_ETH_QSETS*2 + MAX_OFLD_QSETS*2
|
||||
+ MAX_CTRL_QUEUES + MAX_RDMA_QUEUES + MAX_ISCSI_QUEUES,
|
||||
MAX_INGQ = MAX_ETH_QSETS + MAX_OFLD_QSETS + MAX_RDMA_QUEUES
|
||||
+ MAX_RDMA_CIQS + MAX_ISCSI_QUEUES + INGQ_EXTRAS,
|
||||
};
|
||||
|
||||
struct adapter;
|
||||
|
@ -538,6 +544,7 @@ struct sge {
|
|||
struct sge_eth_rxq ethrxq[MAX_ETH_QSETS];
|
||||
struct sge_ofld_rxq ofldrxq[MAX_OFLD_QSETS];
|
||||
struct sge_ofld_rxq rdmarxq[MAX_RDMA_QUEUES];
|
||||
struct sge_ofld_rxq rdmaciq[MAX_RDMA_CIQS];
|
||||
struct sge_rspq fw_evtq ____cacheline_aligned_in_smp;
|
||||
|
||||
struct sge_rspq intrq ____cacheline_aligned_in_smp;
|
||||
|
@ -548,8 +555,10 @@ struct sge {
|
|||
u16 ethtxq_rover; /* Tx queue to clean up next */
|
||||
u16 ofldqsets; /* # of active offload queue sets */
|
||||
u16 rdmaqs; /* # of available RDMA Rx queues */
|
||||
u16 rdmaciqs; /* # of available RDMA concentrator IQs */
|
||||
u16 ofld_rxq[MAX_OFLD_QSETS];
|
||||
u16 rdma_rxq[NCHAN];
|
||||
u16 rdma_ciq[NCHAN];
|
||||
u16 timer_val[SGE_NTIMERS];
|
||||
u8 counter_val[SGE_NCOUNTERS];
|
||||
u32 fl_pg_order; /* large page allocation size */
|
||||
|
@ -577,6 +586,7 @@ struct sge {
|
|||
#define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++)
|
||||
#define for_each_ofldrxq(sge, i) for (i = 0; i < (sge)->ofldqsets; i++)
|
||||
#define for_each_rdmarxq(sge, i) for (i = 0; i < (sge)->rdmaqs; i++)
|
||||
#define for_each_rdmaciq(sge, i) for (i = 0; i < (sge)->rdmaciqs; i++)
|
||||
|
||||
struct l2t_data;
|
||||
|
||||
|
|
|
@ -818,12 +818,17 @@ static void name_msix_vecs(struct adapter *adap)
|
|||
for_each_rdmarxq(&adap->sge, i)
|
||||
snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
|
||||
adap->port[0]->name, i);
|
||||
|
||||
for_each_rdmaciq(&adap->sge, i)
|
||||
snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma-ciq%d",
|
||||
adap->port[0]->name, i);
|
||||
}
|
||||
|
||||
static int request_msix_queue_irqs(struct adapter *adap)
|
||||
{
|
||||
struct sge *s = &adap->sge;
|
||||
int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi_index = 2;
|
||||
int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0;
|
||||
int msi_index = 2;
|
||||
|
||||
err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
|
||||
adap->msix_info[1].desc, &s->fw_evtq);
|
||||
|
@ -857,9 +862,21 @@ static int request_msix_queue_irqs(struct adapter *adap)
|
|||
goto unwind;
|
||||
msi_index++;
|
||||
}
|
||||
for_each_rdmaciq(s, rdmaciqqidx) {
|
||||
err = request_irq(adap->msix_info[msi_index].vec,
|
||||
t4_sge_intr_msix, 0,
|
||||
adap->msix_info[msi_index].desc,
|
||||
&s->rdmaciq[rdmaciqqidx].rspq);
|
||||
if (err)
|
||||
goto unwind;
|
||||
msi_index++;
|
||||
}
|
||||
return 0;
|
||||
|
||||
unwind:
|
||||
while (--rdmaciqqidx >= 0)
|
||||
free_irq(adap->msix_info[--msi_index].vec,
|
||||
&s->rdmaciq[rdmaciqqidx].rspq);
|
||||
while (--rdmaqidx >= 0)
|
||||
free_irq(adap->msix_info[--msi_index].vec,
|
||||
&s->rdmarxq[rdmaqidx].rspq);
|
||||
|
@ -885,6 +902,8 @@ static void free_msix_queue_irqs(struct adapter *adap)
|
|||
free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
|
||||
for_each_rdmarxq(s, i)
|
||||
free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
|
||||
for_each_rdmaciq(s, i)
|
||||
free_irq(adap->msix_info[msi_index++].vec, &s->rdmaciq[i].rspq);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1047,7 +1066,8 @@ freeout: t4_free_sge_resources(adap);
|
|||
if (msi_idx > 0)
|
||||
msi_idx++;
|
||||
err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
|
||||
&q->fl, uldrx_handler);
|
||||
q->fl.size ? &q->fl : NULL,
|
||||
uldrx_handler);
|
||||
if (err)
|
||||
goto freeout;
|
||||
memset(&q->stats, 0, sizeof(q->stats));
|
||||
|
@ -1064,13 +1084,28 @@ freeout: t4_free_sge_resources(adap);
|
|||
if (msi_idx > 0)
|
||||
msi_idx++;
|
||||
err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
|
||||
msi_idx, &q->fl, uldrx_handler);
|
||||
msi_idx, q->fl.size ? &q->fl : NULL,
|
||||
uldrx_handler);
|
||||
if (err)
|
||||
goto freeout;
|
||||
memset(&q->stats, 0, sizeof(q->stats));
|
||||
s->rdma_rxq[i] = q->rspq.abs_id;
|
||||
}
|
||||
|
||||
for_each_rdmaciq(s, i) {
|
||||
struct sge_ofld_rxq *q = &s->rdmaciq[i];
|
||||
|
||||
if (msi_idx > 0)
|
||||
msi_idx++;
|
||||
err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
|
||||
msi_idx, q->fl.size ? &q->fl : NULL,
|
||||
uldrx_handler);
|
||||
if (err)
|
||||
goto freeout;
|
||||
memset(&q->stats, 0, sizeof(q->stats));
|
||||
s->rdma_ciq[i] = q->rspq.abs_id;
|
||||
}
|
||||
|
||||
for_each_port(adap, i) {
|
||||
/*
|
||||
* Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
|
||||
|
@ -2468,8 +2503,7 @@ static unsigned int qtimer_val(const struct adapter *adap,
|
|||
}
|
||||
|
||||
/**
|
||||
* set_rxq_intr_params - set a queue's interrupt holdoff parameters
|
||||
* @adap: the adapter
|
||||
* set_rspq_intr_params - set a queue's interrupt holdoff parameters
|
||||
* @q: the Rx queue
|
||||
* @us: the hold-off time in us, or 0 to disable timer
|
||||
* @cnt: the hold-off packet count, or 0 to disable counter
|
||||
|
@ -2477,9 +2511,11 @@ static unsigned int qtimer_val(const struct adapter *adap,
|
|||
* Sets an Rx queue's interrupt hold-off time and packet count. At least
|
||||
* one of the two needs to be enabled for the queue to generate interrupts.
|
||||
*/
|
||||
static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q,
|
||||
static int set_rspq_intr_params(struct sge_rspq *q,
|
||||
unsigned int us, unsigned int cnt)
|
||||
{
|
||||
struct adapter *adap = q->adap;
|
||||
|
||||
if ((us | cnt) == 0)
|
||||
cnt = 1;
|
||||
|
||||
|
@ -2506,24 +2542,34 @@ static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* set_rx_intr_params - set a net devices's RX interrupt holdoff paramete!
|
||||
* @dev: the network device
|
||||
* @us: the hold-off time in us, or 0 to disable timer
|
||||
* @cnt: the hold-off packet count, or 0 to disable counter
|
||||
*
|
||||
* Set the RX interrupt hold-off parameters for a network device.
|
||||
*/
|
||||
static int set_rx_intr_params(struct net_device *dev,
|
||||
unsigned int us, unsigned int cnt)
|
||||
{
|
||||
int i, err;
|
||||
struct port_info *pi = netdev_priv(dev);
|
||||
struct adapter *adap = pi->adapter;
|
||||
struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
|
||||
|
||||
for (i = 0; i < pi->nqsets; i++, q++) {
|
||||
err = set_rspq_intr_params(&q->rspq, us, cnt);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
|
||||
{
|
||||
const struct port_info *pi = netdev_priv(dev);
|
||||
struct adapter *adap = pi->adapter;
|
||||
struct sge_rspq *q;
|
||||
int i;
|
||||
int r = 0;
|
||||
|
||||
for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++) {
|
||||
q = &adap->sge.ethrxq[i].rspq;
|
||||
r = set_rxq_intr_params(adap, q, c->rx_coalesce_usecs,
|
||||
return set_rx_intr_params(dev, c->rx_coalesce_usecs,
|
||||
c->rx_max_coalesced_frames);
|
||||
if (r) {
|
||||
dev_err(&dev->dev, "failed to set coalesce %d\n", r);
|
||||
break;
|
||||
}
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
|
||||
|
@ -3392,6 +3438,77 @@ unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
|
|||
}
|
||||
EXPORT_SYMBOL(cxgb4_best_mtu);
|
||||
|
||||
/**
|
||||
* cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
|
||||
* @mtus: the HW MTU table
|
||||
* @header_size: Header Size
|
||||
* @data_size_max: maximum Data Segment Size
|
||||
* @data_size_align: desired Data Segment Size Alignment (2^N)
|
||||
* @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL)
|
||||
*
|
||||
* Similar to cxgb4_best_mtu() but instead of searching the Hardware
|
||||
* MTU Table based solely on a Maximum MTU parameter, we break that
|
||||
* parameter up into a Header Size and Maximum Data Segment Size, and
|
||||
* provide a desired Data Segment Size Alignment. If we find an MTU in
|
||||
* the Hardware MTU Table which will result in a Data Segment Size with
|
||||
* the requested alignment _and_ that MTU isn't "too far" from the
|
||||
* closest MTU, then we'll return that rather than the closest MTU.
|
||||
*/
|
||||
unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
|
||||
unsigned short header_size,
|
||||
unsigned short data_size_max,
|
||||
unsigned short data_size_align,
|
||||
unsigned int *mtu_idxp)
|
||||
{
|
||||
unsigned short max_mtu = header_size + data_size_max;
|
||||
unsigned short data_size_align_mask = data_size_align - 1;
|
||||
int mtu_idx, aligned_mtu_idx;
|
||||
|
||||
/* Scan the MTU Table till we find an MTU which is larger than our
|
||||
* Maximum MTU or we reach the end of the table. Along the way,
|
||||
* record the last MTU found, if any, which will result in a Data
|
||||
* Segment Length matching the requested alignment.
|
||||
*/
|
||||
for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) {
|
||||
unsigned short data_size = mtus[mtu_idx] - header_size;
|
||||
|
||||
/* If this MTU minus the Header Size would result in a
|
||||
* Data Segment Size of the desired alignment, remember it.
|
||||
*/
|
||||
if ((data_size & data_size_align_mask) == 0)
|
||||
aligned_mtu_idx = mtu_idx;
|
||||
|
||||
/* If we're not at the end of the Hardware MTU Table and the
|
||||
* next element is larger than our Maximum MTU, drop out of
|
||||
* the loop.
|
||||
*/
|
||||
if (mtu_idx+1 < NMTUS && mtus[mtu_idx+1] > max_mtu)
|
||||
break;
|
||||
}
|
||||
|
||||
/* If we fell out of the loop because we ran to the end of the table,
|
||||
* then we just have to use the last [largest] entry.
|
||||
*/
|
||||
if (mtu_idx == NMTUS)
|
||||
mtu_idx--;
|
||||
|
||||
/* If we found an MTU which resulted in the requested Data Segment
|
||||
* Length alignment and that's "not far" from the largest MTU which is
|
||||
* less than or equal to the maximum MTU, then use that.
|
||||
*/
|
||||
if (aligned_mtu_idx >= 0 &&
|
||||
mtu_idx - aligned_mtu_idx <= 1)
|
||||
mtu_idx = aligned_mtu_idx;
|
||||
|
||||
/* If the caller has passed in an MTU Index pointer, pass the
|
||||
* MTU Index back. Return the MTU value.
|
||||
*/
|
||||
if (mtu_idxp)
|
||||
*mtu_idxp = mtu_idx;
|
||||
return mtus[mtu_idx];
|
||||
}
|
||||
EXPORT_SYMBOL(cxgb4_best_aligned_mtu);
|
||||
|
||||
/**
|
||||
* cxgb4_port_chan - get the HW channel of a port
|
||||
* @dev: the net device for the port
|
||||
|
@ -3789,7 +3906,9 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
|
|||
lli.mtus = adap->params.mtus;
|
||||
if (uld == CXGB4_ULD_RDMA) {
|
||||
lli.rxq_ids = adap->sge.rdma_rxq;
|
||||
lli.ciq_ids = adap->sge.rdma_ciq;
|
||||
lli.nrxq = adap->sge.rdmaqs;
|
||||
lli.nciq = adap->sge.rdmaciqs;
|
||||
} else if (uld == CXGB4_ULD_ISCSI) {
|
||||
lli.rxq_ids = adap->sge.ofld_rxq;
|
||||
lli.nrxq = adap->sge.ofldqsets;
|
||||
|
@ -5535,13 +5654,41 @@ static int adap_init0(struct adapter *adap)
|
|||
#undef FW_PARAM_PFVF
|
||||
#undef FW_PARAM_DEV
|
||||
|
||||
/*
|
||||
* These are finalized by FW initialization, load their values now.
|
||||
/* The MTU/MSS Table is initialized by now, so load their values. If
|
||||
* we're initializing the adapter, then we'll make any modifications
|
||||
* we want to the MTU/MSS Table and also initialize the congestion
|
||||
* parameters.
|
||||
*/
|
||||
t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
|
||||
if (state != DEV_STATE_INIT) {
|
||||
int i;
|
||||
|
||||
/* The default MTU Table contains values 1492 and 1500.
|
||||
* However, for TCP, it's better to have two values which are
|
||||
* a multiple of 8 +/- 4 bytes apart near this popular MTU.
|
||||
* This allows us to have a TCP Data Payload which is a
|
||||
* multiple of 8 regardless of what combination of TCP Options
|
||||
* are in use (always a multiple of 4 bytes) which is
|
||||
* important for performance reasons. For instance, if no
|
||||
* options are in use, then we have a 20-byte IP header and a
|
||||
* 20-byte TCP header. In this case, a 1500-byte MSS would
|
||||
* result in a TCP Data Payload of 1500 - 40 == 1460 bytes
|
||||
* which is not a multiple of 8. So using an MSS of 1488 in
|
||||
* this case results in a TCP Data Payload of 1448 bytes which
|
||||
* is a multiple of 8. On the other hand, if 12-byte TCP Time
|
||||
* Stamps have been negotiated, then an MTU of 1500 bytes
|
||||
* results in a TCP Data Payload of 1448 bytes which, as
|
||||
* above, is a multiple of 8 bytes ...
|
||||
*/
|
||||
for (i = 0; i < NMTUS; i++)
|
||||
if (adap->params.mtus[i] == 1492) {
|
||||
adap->params.mtus[i] = 1488;
|
||||
break;
|
||||
}
|
||||
|
||||
t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
|
||||
adap->params.b_wnd);
|
||||
|
||||
}
|
||||
t4_init_tp_params(adap);
|
||||
adap->flags |= FW_OK;
|
||||
return 0;
|
||||
|
@ -5676,12 +5823,12 @@ static inline bool is_x_10g_port(const struct link_config *lc)
|
|||
(lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
|
||||
}
|
||||
|
||||
static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx,
|
||||
static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
|
||||
unsigned int us, unsigned int cnt,
|
||||
unsigned int size, unsigned int iqe_size)
|
||||
{
|
||||
q->intr_params = QINTR_TIMER_IDX(timer_idx) |
|
||||
(pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0);
|
||||
q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0;
|
||||
q->adap = adap;
|
||||
set_rspq_intr_params(q, us, cnt);
|
||||
q->iqe_len = iqe_size;
|
||||
q->size = size;
|
||||
}
|
||||
|
@ -5695,6 +5842,7 @@ static void cfg_queues(struct adapter *adap)
|
|||
{
|
||||
struct sge *s = &adap->sge;
|
||||
int i, q10g = 0, n10g = 0, qidx = 0;
|
||||
int ciq_size;
|
||||
|
||||
for_each_port(adap, i)
|
||||
n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
|
||||
|
@ -5733,12 +5881,13 @@ static void cfg_queues(struct adapter *adap)
|
|||
s->ofldqsets = adap->params.nports;
|
||||
/* For RDMA one Rx queue per channel suffices */
|
||||
s->rdmaqs = adap->params.nports;
|
||||
s->rdmaciqs = adap->params.nports;
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
|
||||
struct sge_eth_rxq *r = &s->ethrxq[i];
|
||||
|
||||
init_rspq(&r->rspq, 0, 0, 1024, 64);
|
||||
init_rspq(adap, &r->rspq, 5, 10, 1024, 64);
|
||||
r->fl.size = 72;
|
||||
}
|
||||
|
||||
|
@ -5754,7 +5903,7 @@ static void cfg_queues(struct adapter *adap)
|
|||
for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
|
||||
struct sge_ofld_rxq *r = &s->ofldrxq[i];
|
||||
|
||||
init_rspq(&r->rspq, 0, 0, 1024, 64);
|
||||
init_rspq(adap, &r->rspq, 5, 1, 1024, 64);
|
||||
r->rspq.uld = CXGB4_ULD_ISCSI;
|
||||
r->fl.size = 72;
|
||||
}
|
||||
|
@ -5762,13 +5911,26 @@ static void cfg_queues(struct adapter *adap)
|
|||
for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
|
||||
struct sge_ofld_rxq *r = &s->rdmarxq[i];
|
||||
|
||||
init_rspq(&r->rspq, 0, 0, 511, 64);
|
||||
init_rspq(adap, &r->rspq, 5, 1, 511, 64);
|
||||
r->rspq.uld = CXGB4_ULD_RDMA;
|
||||
r->fl.size = 72;
|
||||
}
|
||||
|
||||
init_rspq(&s->fw_evtq, 6, 0, 512, 64);
|
||||
init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64);
|
||||
ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
|
||||
if (ciq_size > SGE_MAX_IQ_SIZE) {
|
||||
CH_WARN(adap, "CIQ size too small for available IQs\n");
|
||||
ciq_size = SGE_MAX_IQ_SIZE;
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(s->rdmaciq); i++) {
|
||||
struct sge_ofld_rxq *r = &s->rdmaciq[i];
|
||||
|
||||
init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
|
||||
r->rspq.uld = CXGB4_ULD_RDMA;
|
||||
}
|
||||
|
||||
init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
|
||||
init_rspq(adap, &s->intrq, 0, 1, 2 * MAX_INGQ, 64);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -5815,9 +5977,9 @@ static int enable_msix(struct adapter *adap)
|
|||
|
||||
want = s->max_ethqsets + EXTRA_VECS;
|
||||
if (is_offload(adap)) {
|
||||
want += s->rdmaqs + s->ofldqsets;
|
||||
want += s->rdmaqs + s->rdmaciqs + s->ofldqsets;
|
||||
/* need nchan for each possible ULD */
|
||||
ofld_need = 2 * nchan;
|
||||
ofld_need = 3 * nchan;
|
||||
}
|
||||
need = adap->params.nports + EXTRA_VECS + ofld_need;
|
||||
|
||||
|
|
|
@ -232,8 +232,10 @@ struct cxgb4_lld_info {
|
|||
const struct cxgb4_virt_res *vr; /* assorted HW resources */
|
||||
const unsigned short *mtus; /* MTU table */
|
||||
const unsigned short *rxq_ids; /* the ULD's Rx queue ids */
|
||||
const unsigned short *ciq_ids; /* the ULD's concentrator IQ ids */
|
||||
unsigned short nrxq; /* # of Rx queues */
|
||||
unsigned short ntxq; /* # of Tx queues */
|
||||
unsigned short nciq; /* # of concentrator IQ */
|
||||
unsigned char nchan:4; /* # of channels */
|
||||
unsigned char nports:4; /* # of ports */
|
||||
unsigned char wr_cred; /* WR 16-byte credits */
|
||||
|
@ -274,6 +276,11 @@ unsigned int cxgb4_port_viid(const struct net_device *dev);
|
|||
unsigned int cxgb4_port_idx(const struct net_device *dev);
|
||||
unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
|
||||
unsigned int *idx);
|
||||
unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
|
||||
unsigned short header_size,
|
||||
unsigned short data_size_max,
|
||||
unsigned short data_size_align,
|
||||
unsigned int *mtu_idxp);
|
||||
void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
|
||||
struct tp_tcp_stats *v6);
|
||||
void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
|
||||
|
|
|
@ -2215,7 +2215,6 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
|
|||
iq->cntxt_id = ntohs(c.iqid);
|
||||
iq->abs_id = ntohs(c.physiqid);
|
||||
iq->size--; /* subtract status entry */
|
||||
iq->adap = adap;
|
||||
iq->netdev = dev;
|
||||
iq->handler = hnd;
|
||||
|
||||
|
@ -2515,6 +2514,10 @@ void t4_free_sge_resources(struct adapter *adap)
|
|||
if (oq->rspq.desc)
|
||||
free_rspq_fl(adap, &oq->rspq, &oq->fl);
|
||||
}
|
||||
for (i = 0, oq = adap->sge.rdmaciq; i < adap->sge.rdmaciqs; i++, oq++) {
|
||||
if (oq->rspq.desc)
|
||||
free_rspq_fl(adap, &oq->rspq, &oq->fl);
|
||||
}
|
||||
|
||||
/* clean up offload Tx queues */
|
||||
for (i = 0; i < ARRAY_SIZE(adap->sge.ofldtxq); i++) {
|
||||
|
|
|
@ -68,6 +68,7 @@ enum {
|
|||
SGE_MAX_WR_LEN = 512, /* max WR size in bytes */
|
||||
SGE_NTIMERS = 6, /* # of interrupt holdoff timer values */
|
||||
SGE_NCOUNTERS = 4, /* # of interrupt packet counter values */
|
||||
SGE_MAX_IQ_SIZE = 65520,
|
||||
|
||||
SGE_TIMER_RSTRT_CNTR = 6, /* restart RX packet threshold counter */
|
||||
SGE_TIMER_UPD_CIDX = 7, /* update cidx only */
|
||||
|
|
|
@ -227,6 +227,7 @@ struct cpl_pass_open_req {
|
|||
#define DELACK(x) ((x) << 5)
|
||||
#define ULP_MODE(x) ((x) << 8)
|
||||
#define RCV_BUFSIZ(x) ((x) << 12)
|
||||
#define RCV_BUFSIZ_MASK 0x3FFU
|
||||
#define DSCP(x) ((x) << 22)
|
||||
#define SMAC_SEL(x) ((u64)(x) << 28)
|
||||
#define L2T_IDX(x) ((u64)(x) << 36)
|
||||
|
@ -278,6 +279,15 @@ struct cpl_pass_accept_rpl {
|
|||
__be64 opt0;
|
||||
};
|
||||
|
||||
struct cpl_t5_pass_accept_rpl {
|
||||
WR_HDR;
|
||||
union opcode_tid ot;
|
||||
__be32 opt2;
|
||||
__be64 opt0;
|
||||
__be32 iss;
|
||||
__be32 rsvd;
|
||||
};
|
||||
|
||||
struct cpl_act_open_req {
|
||||
WR_HDR;
|
||||
union opcode_tid ot;
|
||||
|
|
Loading…
Reference in New Issue