mlx5-fixes-2020-02-06
-----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEGhZs6bAKwk/OTgTpSD+KveBX+j4FAl48dfgACgkQSD+KveBX +j4c5ggAsh46tNmBzFaTIqu9U+N7KGbos73/x/mSZOjLQ41sAEau4mgxAiejgbs5 qU/edgQ19FyrKj3o1tssEXR3OgOrcPyrOU9FPnfE+Ok9AlDevvFd/bkl2fdrDYbx LHtKBaJUMbytJWWcChCErdsq5qIzLkBrCqEpr5D2E9tNnSfvgmAmYpwylF7d8+KR ux3+m9hyIVT7zCkFrBGgqyfrPhILV4Al6azVfJpY9TIPekzalQlcuPEMI/8OcHCy AKOYoXwpCY4biAF35gUpRb6r5jhxiNI0Nsr7MQmaSd7vA8dlW7ENlyxNfnKB+2KT u2OTrGSgqgZx9+WxWoPmokuyqvDJjg== =jiJ0 -----END PGP SIGNATURE----- Merge tag 'mlx5-fixes-2020-02-06' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux Saeed Mahameed says: ==================== Mellanox, mlx5 fixes 2020-02-06 This series introduces some fixes to mlx5 driver. Please pull and let me know if there is any problem. For -stable v4.19: ('net/mlx5: IPsec, Fix esp modify function attribute') ('net/mlx5: IPsec, fix memory leak at mlx5_fpga_ipsec_delete_sa_ctx') For -stable v5.4: ('net/mlx5: Deprecate usage of generic TLS HW capability bit') ('net/mlx5: Fix deadlock in fs_core') For -stable v5.5: ('net/mlx5e: TX, Error completion is for last WQE in batch') ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
f798a5a0a6
|
@ -45,7 +45,7 @@ void mlx5_ktls_destroy_key(struct mlx5_core_dev *mdev, u32 key_id);
|
|||
|
||||
static inline bool mlx5_accel_is_ktls_device(struct mlx5_core_dev *mdev)
|
||||
{
|
||||
if (!MLX5_CAP_GEN(mdev, tls))
|
||||
if (!MLX5_CAP_GEN(mdev, tls_tx))
|
||||
return false;
|
||||
|
||||
if (!MLX5_CAP_GEN(mdev, log_max_dek))
|
||||
|
|
|
@ -269,7 +269,7 @@ struct sk_buff *mlx5e_tls_handle_tx_skb(struct net_device *netdev,
|
|||
int datalen;
|
||||
u32 skb_seq;
|
||||
|
||||
if (MLX5_CAP_GEN(sq->channel->mdev, tls)) {
|
||||
if (MLX5_CAP_GEN(sq->channel->mdev, tls_tx)) {
|
||||
skb = mlx5e_ktls_handle_tx_skb(netdev, sq, skb, wqe, pi);
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -613,13 +613,6 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
|
|||
|
||||
wqe_counter = be16_to_cpu(cqe->wqe_counter);
|
||||
|
||||
if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
|
||||
netdev_WARN_ONCE(cq->channel->netdev,
|
||||
"Bad OP in ICOSQ CQE: 0x%x\n", get_cqe_opcode(cqe));
|
||||
if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
|
||||
queue_work(cq->channel->priv->wq, &sq->recover_work);
|
||||
break;
|
||||
}
|
||||
do {
|
||||
struct mlx5e_sq_wqe_info *wi;
|
||||
u16 ci;
|
||||
|
@ -629,6 +622,15 @@ void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
|
|||
ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
|
||||
wi = &sq->db.ico_wqe[ci];
|
||||
|
||||
if (last_wqe && unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
|
||||
netdev_WARN_ONCE(cq->channel->netdev,
|
||||
"Bad OP in ICOSQ CQE: 0x%x\n",
|
||||
get_cqe_opcode(cqe));
|
||||
if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
|
||||
queue_work(cq->channel->priv->wq, &sq->recover_work);
|
||||
break;
|
||||
}
|
||||
|
||||
if (likely(wi->opcode == MLX5_OPCODE_UMR)) {
|
||||
sqcc += MLX5E_UMR_WQEBBS;
|
||||
wi->umr.rq->mpwqe.umr_completed++;
|
||||
|
|
|
@ -451,34 +451,17 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
|
|||
|
||||
i = 0;
|
||||
do {
|
||||
struct mlx5e_tx_wqe_info *wi;
|
||||
u16 wqe_counter;
|
||||
bool last_wqe;
|
||||
u16 ci;
|
||||
|
||||
mlx5_cqwq_pop(&cq->wq);
|
||||
|
||||
wqe_counter = be16_to_cpu(cqe->wqe_counter);
|
||||
|
||||
if (unlikely(get_cqe_opcode(cqe) == MLX5_CQE_REQ_ERR)) {
|
||||
if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING,
|
||||
&sq->state)) {
|
||||
struct mlx5e_tx_wqe_info *wi;
|
||||
u16 ci;
|
||||
|
||||
ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
|
||||
wi = &sq->db.wqe_info[ci];
|
||||
mlx5e_dump_error_cqe(sq,
|
||||
(struct mlx5_err_cqe *)cqe);
|
||||
mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
|
||||
queue_work(cq->channel->priv->wq,
|
||||
&sq->recover_work);
|
||||
}
|
||||
stats->cqe_err++;
|
||||
}
|
||||
|
||||
do {
|
||||
struct mlx5e_tx_wqe_info *wi;
|
||||
struct sk_buff *skb;
|
||||
u16 ci;
|
||||
int j;
|
||||
|
||||
last_wqe = (sqcc == wqe_counter);
|
||||
|
@ -516,6 +499,18 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
|
|||
napi_consume_skb(skb, napi_budget);
|
||||
} while (!last_wqe);
|
||||
|
||||
if (unlikely(get_cqe_opcode(cqe) == MLX5_CQE_REQ_ERR)) {
|
||||
if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING,
|
||||
&sq->state)) {
|
||||
mlx5e_dump_error_cqe(sq,
|
||||
(struct mlx5_err_cqe *)cqe);
|
||||
mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
|
||||
queue_work(cq->channel->priv->wq,
|
||||
&sq->recover_work);
|
||||
}
|
||||
stats->cqe_err++;
|
||||
}
|
||||
|
||||
} while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
|
||||
|
||||
stats->cqes += i;
|
||||
|
|
|
@ -850,6 +850,7 @@ void mlx5_fpga_ipsec_delete_sa_ctx(void *context)
|
|||
mutex_lock(&fpga_xfrm->lock);
|
||||
if (!--fpga_xfrm->num_rules) {
|
||||
mlx5_fpga_ipsec_release_sa_ctx(fpga_xfrm->sa_ctx);
|
||||
kfree(fpga_xfrm->sa_ctx);
|
||||
fpga_xfrm->sa_ctx = NULL;
|
||||
}
|
||||
mutex_unlock(&fpga_xfrm->lock);
|
||||
|
@ -1478,7 +1479,7 @@ int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm,
|
|||
if (!memcmp(&xfrm->attrs, attrs, sizeof(xfrm->attrs)))
|
||||
return 0;
|
||||
|
||||
if (!mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) {
|
||||
if (mlx5_fpga_esp_validate_xfrm_attrs(mdev, attrs)) {
|
||||
mlx5_core_warn(mdev, "Tried to create an esp with unsupported attrs\n");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
|
|
@ -1582,16 +1582,16 @@ struct match_list_head {
|
|||
struct match_list first;
|
||||
};
|
||||
|
||||
static void free_match_list(struct match_list_head *head)
|
||||
static void free_match_list(struct match_list_head *head, bool ft_locked)
|
||||
{
|
||||
if (!list_empty(&head->list)) {
|
||||
struct match_list *iter, *match_tmp;
|
||||
|
||||
list_del(&head->first.list);
|
||||
tree_put_node(&head->first.g->node, false);
|
||||
tree_put_node(&head->first.g->node, ft_locked);
|
||||
list_for_each_entry_safe(iter, match_tmp, &head->list,
|
||||
list) {
|
||||
tree_put_node(&iter->g->node, false);
|
||||
tree_put_node(&iter->g->node, ft_locked);
|
||||
list_del(&iter->list);
|
||||
kfree(iter);
|
||||
}
|
||||
|
@ -1600,7 +1600,8 @@ static void free_match_list(struct match_list_head *head)
|
|||
|
||||
static int build_match_list(struct match_list_head *match_head,
|
||||
struct mlx5_flow_table *ft,
|
||||
const struct mlx5_flow_spec *spec)
|
||||
const struct mlx5_flow_spec *spec,
|
||||
bool ft_locked)
|
||||
{
|
||||
struct rhlist_head *tmp, *list;
|
||||
struct mlx5_flow_group *g;
|
||||
|
@ -1625,7 +1626,7 @@ static int build_match_list(struct match_list_head *match_head,
|
|||
|
||||
curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC);
|
||||
if (!curr_match) {
|
||||
free_match_list(match_head);
|
||||
free_match_list(match_head, ft_locked);
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
@ -1805,7 +1806,7 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
|
|||
version = atomic_read(&ft->node.version);
|
||||
|
||||
/* Collect all fgs which has a matching match_criteria */
|
||||
err = build_match_list(&match_head, ft, spec);
|
||||
err = build_match_list(&match_head, ft, spec, take_write);
|
||||
if (err) {
|
||||
if (take_write)
|
||||
up_write_ref_node(&ft->node, false);
|
||||
|
@ -1819,7 +1820,7 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
|
|||
|
||||
rule = try_add_to_existing_fg(ft, &match_head.list, spec, flow_act, dest,
|
||||
dest_num, version);
|
||||
free_match_list(&match_head);
|
||||
free_match_list(&match_head, take_write);
|
||||
if (!IS_ERR(rule) ||
|
||||
(PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN)) {
|
||||
if (take_write)
|
||||
|
|
|
@ -242,7 +242,7 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
|
|||
return err;
|
||||
}
|
||||
|
||||
if (MLX5_CAP_GEN(dev, tls)) {
|
||||
if (MLX5_CAP_GEN(dev, tls_tx)) {
|
||||
err = mlx5_core_get_caps(dev, MLX5_CAP_TLS);
|
||||
if (err)
|
||||
return err;
|
||||
|
|
|
@ -1448,14 +1448,15 @@ struct mlx5_ifc_cmd_hca_cap_bits {
|
|||
|
||||
u8 reserved_at_440[0x20];
|
||||
|
||||
u8 tls[0x1];
|
||||
u8 reserved_at_461[0x2];
|
||||
u8 reserved_at_460[0x3];
|
||||
u8 log_max_uctx[0x5];
|
||||
u8 reserved_at_468[0x3];
|
||||
u8 log_max_umem[0x5];
|
||||
u8 max_num_eqs[0x10];
|
||||
|
||||
u8 reserved_at_480[0x3];
|
||||
u8 reserved_at_480[0x1];
|
||||
u8 tls_tx[0x1];
|
||||
u8 reserved_at_482[0x1];
|
||||
u8 log_max_l2_table[0x5];
|
||||
u8 reserved_at_488[0x8];
|
||||
u8 log_uar_page_sz[0x10];
|
||||
|
|
Loading…
Reference in New Issue