net: hns3: Fixes ring-to-vector map-and-unmap command

This patch fixes the vector-to-ring map and unmap command and adds
INT_GL(for, Gap Limiting Interrupts) and VF id to it as required
by the hardware interface.

Fixes: 6427264ef330 ("net: hns3: Add HNS3 Acceleration Engine &
Compatibility Layer Support")
Signed-off-by: Lipeng <lipeng321@huawei.com>
Signed-off-by: Mingguang Qu <qumingguang@huawei.com>
Signed-off-by: Salil Mehta <salil.mehta@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Lipeng 2017-09-19 17:17:12 +01:00 committed by David S. Miller
parent c5b1b97522
commit 0305b443a3
2 changed files with 14 additions and 2 deletions

View File

@ -238,7 +238,7 @@ struct hclge_tqp_map {
u8 rsv[18]; u8 rsv[18];
}; };
#define HCLGE_VECTOR_ELEMENTS_PER_CMD 11 #define HCLGE_VECTOR_ELEMENTS_PER_CMD 10
enum hclge_int_type { enum hclge_int_type {
HCLGE_INT_TX, HCLGE_INT_TX,
@ -252,8 +252,12 @@ struct hclge_ctrl_vector_chain {
#define HCLGE_INT_TYPE_S 0 #define HCLGE_INT_TYPE_S 0
#define HCLGE_INT_TYPE_M 0x3 #define HCLGE_INT_TYPE_M 0x3
#define HCLGE_TQP_ID_S 2 #define HCLGE_TQP_ID_S 2
#define HCLGE_TQP_ID_M (0x3fff << HCLGE_TQP_ID_S) #define HCLGE_TQP_ID_M (0x7ff << HCLGE_TQP_ID_S)
#define HCLGE_INT_GL_IDX_S 13
#define HCLGE_INT_GL_IDX_M (0x3 << HCLGE_INT_GL_IDX_S)
__le16 tqp_type_and_id[HCLGE_VECTOR_ELEMENTS_PER_CMD]; __le16 tqp_type_and_id[HCLGE_VECTOR_ELEMENTS_PER_CMD];
u8 vfid;
u8 rsv;
}; };
#define HCLGE_TC_NUM 8 #define HCLGE_TC_NUM 8

View File

@ -2680,7 +2680,11 @@ int hclge_map_vport_ring_to_vector(struct hclge_vport *vport, int vector_id,
hnae_get_bit(node->flag, HNAE3_RING_TYPE_B)); hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
hnae_set_field(req->tqp_type_and_id[i], HCLGE_TQP_ID_M, hnae_set_field(req->tqp_type_and_id[i], HCLGE_TQP_ID_M,
HCLGE_TQP_ID_S, node->tqp_index); HCLGE_TQP_ID_S, node->tqp_index);
hnae_set_field(req->tqp_type_and_id[i], HCLGE_INT_GL_IDX_M,
HCLGE_INT_GL_IDX_S,
hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
req->tqp_type_and_id[i] = cpu_to_le16(req->tqp_type_and_id[i]); req->tqp_type_and_id[i] = cpu_to_le16(req->tqp_type_and_id[i]);
req->vfid = vport->vport_id;
if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) { if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD; req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
@ -2764,8 +2768,12 @@ static int hclge_unmap_ring_from_vector(
hnae_get_bit(node->flag, HNAE3_RING_TYPE_B)); hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
hnae_set_field(req->tqp_type_and_id[i], HCLGE_TQP_ID_M, hnae_set_field(req->tqp_type_and_id[i], HCLGE_TQP_ID_M,
HCLGE_TQP_ID_S, node->tqp_index); HCLGE_TQP_ID_S, node->tqp_index);
hnae_set_field(req->tqp_type_and_id[i], HCLGE_INT_GL_IDX_M,
HCLGE_INT_GL_IDX_S,
hnae_get_bit(node->flag, HNAE3_RING_TYPE_B));
req->tqp_type_and_id[i] = cpu_to_le16(req->tqp_type_and_id[i]); req->tqp_type_and_id[i] = cpu_to_le16(req->tqp_type_and_id[i]);
req->vfid = vport->vport_id;
if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) { if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD; req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;