net: hns3: add handling for big TX fragment
This patch unifies big tx fragment handling for tso and non-tso case. Signed-off-by: Fuyun Liang <liangfuyun1@huawei.com> Signed-off-by: Peng Li <lipeng321@huawei.com> Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
5188f218fc
commit
1e8a7977d0
|
@ -985,10 +985,13 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
|
||||||
u32 ol_type_vlan_len_msec = 0;
|
u32 ol_type_vlan_len_msec = 0;
|
||||||
u16 bdtp_fe_sc_vld_ra_ri = 0;
|
u16 bdtp_fe_sc_vld_ra_ri = 0;
|
||||||
struct skb_frag_struct *frag;
|
struct skb_frag_struct *frag;
|
||||||
|
unsigned int frag_buf_num;
|
||||||
u32 type_cs_vlan_tso = 0;
|
u32 type_cs_vlan_tso = 0;
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
u16 inner_vtag = 0;
|
u16 inner_vtag = 0;
|
||||||
u16 out_vtag = 0;
|
u16 out_vtag = 0;
|
||||||
|
unsigned int k;
|
||||||
|
int sizeoflast;
|
||||||
u32 paylen = 0;
|
u32 paylen = 0;
|
||||||
dma_addr_t dma;
|
dma_addr_t dma;
|
||||||
u16 mss = 0;
|
u16 mss = 0;
|
||||||
|
@ -996,16 +999,6 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
|
||||||
u8 il4_proto;
|
u8 il4_proto;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
/* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
|
|
||||||
desc_cb->priv = priv;
|
|
||||||
desc_cb->length = size;
|
|
||||||
desc_cb->type = type;
|
|
||||||
|
|
||||||
/* now, fill the descriptor */
|
|
||||||
desc->tx.send_size = cpu_to_le16((u16)size);
|
|
||||||
hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end);
|
|
||||||
desc->tx.bdtp_fe_sc_vld_ra_ri = cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
|
|
||||||
|
|
||||||
if (type == DESC_TYPE_SKB) {
|
if (type == DESC_TYPE_SKB) {
|
||||||
skb = (struct sk_buff *)priv;
|
skb = (struct sk_buff *)priv;
|
||||||
paylen = skb->len;
|
paylen = skb->len;
|
||||||
|
@ -1058,12 +1051,36 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
desc_cb->dma = dma;
|
frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
|
||||||
desc->addr = cpu_to_le64(dma);
|
sizeoflast = size % HNS3_MAX_BD_SIZE;
|
||||||
|
sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
|
||||||
|
|
||||||
|
/* When frag size is bigger than hardware limit, split this frag */
|
||||||
|
for (k = 0; k < frag_buf_num; k++) {
|
||||||
|
/* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
|
||||||
|
desc_cb->priv = priv;
|
||||||
|
desc_cb->length = (k == frag_buf_num - 1) ?
|
||||||
|
sizeoflast : HNS3_MAX_BD_SIZE;
|
||||||
|
desc_cb->dma = dma + HNS3_MAX_BD_SIZE * k;
|
||||||
|
desc_cb->type = (type == DESC_TYPE_SKB && !k) ?
|
||||||
|
DESC_TYPE_SKB : DESC_TYPE_PAGE;
|
||||||
|
|
||||||
|
/* now, fill the descriptor */
|
||||||
|
desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k);
|
||||||
|
desc->tx.send_size = cpu_to_le16((u16)desc_cb->length);
|
||||||
|
hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri,
|
||||||
|
frag_end && (k == frag_buf_num - 1) ?
|
||||||
|
1 : 0);
|
||||||
|
desc->tx.bdtp_fe_sc_vld_ra_ri =
|
||||||
|
cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
|
||||||
|
|
||||||
/* move ring pointer to next.*/
|
/* move ring pointer to next.*/
|
||||||
ring_ptr_move_fw(ring, next_to_use);
|
ring_ptr_move_fw(ring, next_to_use);
|
||||||
|
|
||||||
|
desc_cb = &ring->desc_cb[ring->next_to_use];
|
||||||
|
desc = &ring->desc[ring->next_to_use];
|
||||||
|
}
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue