mlx5-fixes-2020-05-22

-----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCAAdFiEEGhZs6bAKwk/OTgTpSD+KveBX+j4FAl7IbksACgkQSD+KveBX
 +j5T8Af/XT6b23VlSn2Km4tg8WQNDRJLdq1s6fTS5SGcyc0awxfH07cvYvJ26kKW
 kmdDNijkVbd0ma2UxHiiD3vmE8Vs85gZ6BDNyl485x/cH3zFzAm54R5fZdnK5JgN
 YNgdFP0MOwPtAdDtxLH+r8aOyNKncIOmCZrMNnxVgI+IytG1L5QLnS6GeQy2zyIx
 9F/9sihta2z567IstGu2wvmgviSHVk/zV9yqn/orD9tV6oFvvrBQMlEt8l27b1tA
 4bajbHIyc1WmfQ+wg56eXATdbqCQ2YYfMjhchiCfFv5DhnMnPi5bV0PNR9Rq0CYw
 05xpF16/85uvDbTizsgGNZ1Pb1nGsQ==
 =oFWF
 -----END PGP SIGNATURE-----

Merge tag 'mlx5-fixes-2020-05-22' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5 fixes 2020-05-22

This series introduces some fixes to mlx5 driver.

Please pull and let me know if there is any problem.

For -stable v4.13
   ('net/mlx5: Add command entry handling completion')

For -stable v5.2
   ('net/mlx5: Fix error flow in case of function_setup failure')
   ('net/mlx5: Fix memory leak in mlx5_events_init')

For -stable v5.3
   ('net/mlx5e: Update netdev txq on completions during closure')
   ('net/mlx5e: kTLS, Destroy key object after destroying the TIS')
   ('net/mlx5e: Fix inner tirs handling')

For -stable v5.6
   ('net/mlx5: Fix cleaning unmanaged flow tables')
   ('net/mlx5: Fix a race when moving command interface to events mode')
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2020-05-23 16:39:45 -07:00
commit e3181e9a72
16 changed files with 168 additions and 48 deletions

View File

@ -848,6 +848,14 @@ static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg);
static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
struct mlx5_cmd_msg *msg);
static bool opcode_allowed(struct mlx5_cmd *cmd, u16 opcode)
{
if (cmd->allowed_opcode == CMD_ALLOWED_OPCODE_ALL)
return true;
return cmd->allowed_opcode == opcode;
}
static void cmd_work_handler(struct work_struct *work)
{
struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
@ -861,6 +869,7 @@ static void cmd_work_handler(struct work_struct *work)
int alloc_ret;
int cmd_mode;
complete(&ent->handling);
sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
down(sem);
if (!ent->page_queue) {
@ -913,7 +922,9 @@ static void cmd_work_handler(struct work_struct *work)
/* Skip sending command to fw if internal error */
if (pci_channel_offline(dev->pdev) ||
dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR ||
cmd->state != MLX5_CMDIF_STATE_UP ||
!opcode_allowed(&dev->cmd, ent->op)) {
u8 status = 0;
u32 drv_synd;
@ -978,6 +989,11 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
struct mlx5_cmd *cmd = &dev->cmd;
int err;
if (!wait_for_completion_timeout(&ent->handling, timeout) &&
cancel_work_sync(&ent->work)) {
ent->ret = -ECANCELED;
goto out_err;
}
if (cmd->mode == CMD_MODE_POLLING || ent->polling) {
wait_for_completion(&ent->done);
} else if (!wait_for_completion_timeout(&ent->done, timeout)) {
@ -985,12 +1001,17 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true);
}
out_err:
err = ent->ret;
if (err == -ETIMEDOUT) {
mlx5_core_warn(dev, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
mlx5_command_str(msg_to_opcode(ent->in)),
msg_to_opcode(ent->in));
} else if (err == -ECANCELED) {
mlx5_core_warn(dev, "%s(0x%x) canceled on out of queue timeout.\n",
mlx5_command_str(msg_to_opcode(ent->in)),
msg_to_opcode(ent->in));
}
mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n",
err, deliv_status_to_str(ent->status), ent->status);
@ -1026,6 +1047,7 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
ent->token = token;
ent->polling = force_polling;
init_completion(&ent->handling);
if (!callback)
init_completion(&ent->done);
@ -1045,6 +1067,8 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
err = wait_func(dev, ent);
if (err == -ETIMEDOUT)
goto out;
if (err == -ECANCELED)
goto out_free;
ds = ent->ts2 - ent->ts1;
op = MLX5_GET(mbox_in, in->first.data, opcode);
@ -1391,6 +1415,22 @@ static void create_debugfs_files(struct mlx5_core_dev *dev)
mlx5_cmdif_debugfs_init(dev);
}
void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode)
{
struct mlx5_cmd *cmd = &dev->cmd;
int i;
for (i = 0; i < cmd->max_reg_cmds; i++)
down(&cmd->sem);
down(&cmd->pages_sem);
cmd->allowed_opcode = opcode;
up(&cmd->pages_sem);
for (i = 0; i < cmd->max_reg_cmds; i++)
up(&cmd->sem);
}
static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode)
{
struct mlx5_cmd *cmd = &dev->cmd;
@ -1667,12 +1707,14 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
int err;
u8 status = 0;
u32 drv_synd;
u16 opcode;
u8 token;
opcode = MLX5_GET(mbox_in, in, opcode);
if (pci_channel_offline(dev->pdev) ||
dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
u16 opcode = MLX5_GET(mbox_in, in, opcode);
dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR ||
dev->cmd.state != MLX5_CMDIF_STATE_UP ||
!opcode_allowed(&dev->cmd, opcode)) {
err = mlx5_internal_err_ret_value(dev, opcode, &drv_synd, &status);
MLX5_SET(mbox_out, out, status, status);
MLX5_SET(mbox_out, out, syndrome, drv_synd);
@ -1937,6 +1979,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
goto err_free_page;
}
cmd->state = MLX5_CMDIF_STATE_DOWN;
cmd->checksum_disabled = 1;
cmd->max_reg_cmds = (1 << cmd->log_sz) - 1;
cmd->bitmask = (1UL << cmd->max_reg_cmds) - 1;
@ -1974,6 +2017,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
mlx5_core_dbg(dev, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd->dma));
cmd->mode = CMD_MODE_POLLING;
cmd->allowed_opcode = CMD_ALLOWED_OPCODE_ALL;
create_msg_cache(dev);
@ -2013,3 +2057,10 @@ void mlx5_cmd_cleanup(struct mlx5_core_dev *dev)
dma_pool_destroy(cmd->pool);
}
EXPORT_SYMBOL(mlx5_cmd_cleanup);
void mlx5_cmd_set_state(struct mlx5_core_dev *dev,
enum mlx5_cmdif_state cmdif_state)
{
dev->cmd.state = cmdif_state;
}
EXPORT_SYMBOL(mlx5_cmd_set_state);

View File

@ -1121,7 +1121,7 @@ void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq);
int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv);
int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc);
void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc);
void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv);
int mlx5e_create_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);
void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs);

View File

@ -699,6 +699,7 @@ mlx5_tc_ct_parse_match(struct mlx5e_priv *priv,
struct netlink_ext_ack *extack)
{
struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv);
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
struct flow_dissector_key_ct *mask, *key;
bool trk, est, untrk, unest, new;
u32 ctstate = 0, ctstate_mask = 0;
@ -706,7 +707,7 @@ mlx5_tc_ct_parse_match(struct mlx5e_priv *priv,
u16 ct_state, ct_state_mask;
struct flow_match_ct match;
if (!flow_rule_match_key(f->rule, FLOW_DISSECTOR_KEY_CT))
if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CT))
return 0;
if (!ct_priv) {
@ -715,7 +716,7 @@ mlx5_tc_ct_parse_match(struct mlx5e_priv *priv,
return -EOPNOTSUPP;
}
flow_rule_match_ct(f->rule, &match);
flow_rule_match_ct(rule, &match);
key = match.key;
mask = match.mask;

View File

@ -130,7 +130,9 @@ mlx5_tc_ct_parse_match(struct mlx5e_priv *priv,
struct flow_cls_offload *f,
struct netlink_ext_ack *extack)
{
if (!flow_rule_match_key(f->rule, FLOW_DISSECTOR_KEY_CT))
struct flow_rule *rule = flow_cls_offload_flow_rule(f);
if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CT))
return 0;
NL_SET_ERR_MSG_MOD(extack, "mlx5 tc ct offload isn't enabled.");

View File

@ -69,8 +69,8 @@ static void mlx5e_ktls_del(struct net_device *netdev,
struct mlx5e_ktls_offload_context_tx *tx_priv =
mlx5e_get_ktls_tx_priv_ctx(tls_ctx);
mlx5_ktls_destroy_key(priv->mdev, tx_priv->key_id);
mlx5e_destroy_tis(priv->mdev, tx_priv->tisn);
mlx5_ktls_destroy_key(priv->mdev, tx_priv->key_id);
kvfree(tx_priv);
}

View File

@ -2717,7 +2717,8 @@ void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen)
mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, inlen);
}
if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
/* Verify inner tirs resources allocated */
if (!priv->inner_indir_tir[0].tirn)
return;
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
@ -3408,14 +3409,15 @@ int mlx5e_create_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs)
return err;
}
void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc)
void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv)
{
int i;
for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[i]);
if (!inner_ttc || !mlx5e_tunnel_inner_ft_supported(priv->mdev))
/* Verify inner tirs resources allocated */
if (!priv->inner_indir_tir[0].tirn)
return;
for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
@ -5123,7 +5125,7 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
err_destroy_direct_tirs:
mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
err_destroy_indirect_tirs:
mlx5e_destroy_indirect_tirs(priv, true);
mlx5e_destroy_indirect_tirs(priv);
err_destroy_direct_rqts:
mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
err_destroy_indirect_rqts:
@ -5142,7 +5144,7 @@ static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
mlx5e_destroy_direct_tirs(priv, priv->xsk_tir);
mlx5e_destroy_direct_rqts(priv, priv->xsk_tir);
mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
mlx5e_destroy_indirect_tirs(priv, true);
mlx5e_destroy_indirect_tirs(priv);
mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
mlx5e_destroy_rqt(priv, &priv->indir_rqt);
mlx5e_close_drop_rq(&priv->drop_rq);

View File

@ -1484,13 +1484,9 @@ bool mlx5e_eswitch_uplink_rep(struct net_device *netdev)
return netdev->netdev_ops == &mlx5e_netdev_ops_uplink_rep;
}
bool mlx5e_eswitch_rep(struct net_device *netdev)
bool mlx5e_eswitch_vf_rep(struct net_device *netdev)
{
if (netdev->netdev_ops == &mlx5e_netdev_ops_rep ||
netdev->netdev_ops == &mlx5e_netdev_ops_uplink_rep)
return true;
return false;
return netdev->netdev_ops == &mlx5e_netdev_ops_rep;
}
static void mlx5e_build_rep_params(struct net_device *netdev)
@ -1747,7 +1743,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
err_destroy_direct_tirs:
mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
err_destroy_indirect_tirs:
mlx5e_destroy_indirect_tirs(priv, false);
mlx5e_destroy_indirect_tirs(priv);
err_destroy_direct_rqts:
mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
err_destroy_indirect_rqts:
@ -1765,7 +1761,7 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
mlx5e_destroy_rep_root_ft(priv);
mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
mlx5e_destroy_indirect_tirs(priv, false);
mlx5e_destroy_indirect_tirs(priv);
mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
mlx5e_destroy_rqt(priv, &priv->indir_rqt);
mlx5e_close_drop_rq(&priv->drop_rq);

View File

@ -210,8 +210,13 @@ void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv);
bool mlx5e_eswitch_rep(struct net_device *netdev);
bool mlx5e_eswitch_vf_rep(struct net_device *netdev);
bool mlx5e_eswitch_uplink_rep(struct net_device *netdev);
static inline bool mlx5e_eswitch_rep(struct net_device *netdev)
{
return mlx5e_eswitch_vf_rep(netdev) ||
mlx5e_eswitch_uplink_rep(netdev);
}
#else /* CONFIG_MLX5_ESWITCH */
static inline bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv) { return false; }

View File

@ -3073,6 +3073,11 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
return true;
}
static bool same_port_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
{
return priv->mdev == peer_priv->mdev;
}
static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
{
struct mlx5_core_dev *fmdev, *pmdev;
@ -3291,7 +3296,7 @@ static inline int hash_encap_info(struct encap_key *key)
}
static bool is_merged_eswitch_dev(struct mlx5e_priv *priv,
static bool is_merged_eswitch_vfs(struct mlx5e_priv *priv,
struct net_device *peer_netdev)
{
struct mlx5e_priv *peer_priv;
@ -3299,13 +3304,11 @@ static bool is_merged_eswitch_dev(struct mlx5e_priv *priv,
peer_priv = netdev_priv(peer_netdev);
return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) &&
mlx5e_eswitch_rep(priv->netdev) &&
mlx5e_eswitch_rep(peer_netdev) &&
mlx5e_eswitch_vf_rep(priv->netdev) &&
mlx5e_eswitch_vf_rep(peer_netdev) &&
same_hw_devs(priv, peer_priv));
}
bool mlx5e_encap_take(struct mlx5e_encap_entry *e)
{
return refcount_inc_not_zero(&e->refcnt);
@ -3575,14 +3578,37 @@ static int add_vlan_pop_action(struct mlx5e_priv *priv,
return err;
}
static bool same_hw_reps(struct mlx5e_priv *priv,
struct net_device *peer_netdev)
{
struct mlx5e_priv *peer_priv;
peer_priv = netdev_priv(peer_netdev);
return mlx5e_eswitch_rep(priv->netdev) &&
mlx5e_eswitch_rep(peer_netdev) &&
same_hw_devs(priv, peer_priv);
}
static bool is_lag_dev(struct mlx5e_priv *priv,
struct net_device *peer_netdev)
{
return ((mlx5_lag_is_sriov(priv->mdev) ||
mlx5_lag_is_multipath(priv->mdev)) &&
same_hw_reps(priv, peer_netdev));
}
bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
struct net_device *out_dev)
{
if (is_merged_eswitch_dev(priv, out_dev))
if (is_merged_eswitch_vfs(priv, out_dev))
return true;
if (is_lag_dev(priv, out_dev))
return true;
return mlx5e_eswitch_rep(out_dev) &&
same_hw_devs(priv, netdev_priv(out_dev));
same_port_devs(priv, netdev_priv(out_dev));
}
static bool is_duplicated_output_device(struct net_device *dev,

View File

@ -537,10 +537,9 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq)
{
struct mlx5e_tx_wqe_info *wi;
u32 dma_fifo_cc, nbytes = 0;
u16 ci, sqcc, npkts = 0;
struct sk_buff *skb;
u32 dma_fifo_cc;
u16 sqcc;
u16 ci;
int i;
sqcc = sq->cc;
@ -565,11 +564,15 @@ void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq)
}
dev_kfree_skb_any(skb);
npkts++;
nbytes += wi->num_bytes;
sqcc += wi->num_wqebbs;
}
sq->dma_fifo_cc = dma_fifo_cc;
sq->cc = sqcc;
netdev_tx_completed_queue(sq->txq, npkts, nbytes);
}
#ifdef CONFIG_MLX5_CORE_IPOIB

View File

@ -611,11 +611,13 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
.nent = MLX5_NUM_CMD_EQE,
.mask[0] = 1ull << MLX5_EVENT_TYPE_CMD,
};
mlx5_cmd_allowed_opcode(dev, MLX5_CMD_OP_CREATE_EQ);
err = setup_async_eq(dev, &table->cmd_eq, &param, "cmd");
if (err)
goto err1;
mlx5_cmd_use_events(dev);
mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
param = (struct mlx5_eq_param) {
.irq_index = 0,
@ -645,6 +647,7 @@ static int create_async_eqs(struct mlx5_core_dev *dev)
mlx5_cmd_use_polling(dev);
cleanup_async_eq(dev, &table->cmd_eq, "cmd");
err1:
mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL);
mlx5_eq_notifier_unregister(dev, &table->cq_err_nb);
return err;
}

View File

@ -346,8 +346,10 @@ int mlx5_events_init(struct mlx5_core_dev *dev)
events->dev = dev;
dev->priv.events = events;
events->wq = create_singlethread_workqueue("mlx5_events");
if (!events->wq)
if (!events->wq) {
kfree(events);
return -ENOMEM;
}
INIT_WORK(&events->pcie_core_work, mlx5_pcie_event);
return 0;

View File

@ -344,17 +344,12 @@ static void tree_put_node(struct fs_node *node, bool locked)
if (node->del_hw_func)
node->del_hw_func(node);
if (parent_node) {
/* Only root namespace doesn't have parent and we just
* need to free its node.
*/
down_write_ref_node(parent_node, locked);
list_del_init(&node->list);
if (node->del_sw_func)
node->del_sw_func(node);
up_write_ref_node(parent_node, locked);
} else {
kfree(node);
}
node->del_sw_func(node);
if (parent_node)
up_write_ref_node(parent_node, locked);
node = NULL;
}
if (!node && parent_node)
@ -468,8 +463,10 @@ static void del_sw_flow_table(struct fs_node *node)
fs_get_obj(ft, node);
rhltable_destroy(&ft->fgs_hash);
fs_get_obj(prio, ft->node.parent);
prio->num_ft--;
if (ft->node.parent) {
fs_get_obj(prio, ft->node.parent);
prio->num_ft--;
}
kfree(ft);
}
@ -2351,6 +2348,17 @@ static int init_root_tree(struct mlx5_flow_steering *steering,
return 0;
}
static void del_sw_root_ns(struct fs_node *node)
{
struct mlx5_flow_root_namespace *root_ns;
struct mlx5_flow_namespace *ns;
fs_get_obj(ns, node);
root_ns = container_of(ns, struct mlx5_flow_root_namespace, ns);
mutex_destroy(&root_ns->chain_lock);
kfree(node);
}
static struct mlx5_flow_root_namespace
*create_root_ns(struct mlx5_flow_steering *steering,
enum fs_flow_table_type table_type)
@ -2377,7 +2385,7 @@ static struct mlx5_flow_root_namespace
ns = &root_ns->ns;
fs_init_namespace(ns);
mutex_init(&root_ns->chain_lock);
tree_init_node(&ns->node, NULL, NULL);
tree_init_node(&ns->node, NULL, del_sw_root_ns);
tree_add_node(&ns->node, NULL);
return root_ns;

View File

@ -396,7 +396,7 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
err_destroy_direct_tirs:
mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
err_destroy_indirect_tirs:
mlx5e_destroy_indirect_tirs(priv, true);
mlx5e_destroy_indirect_tirs(priv);
err_destroy_direct_rqts:
mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
err_destroy_indirect_rqts:
@ -412,7 +412,7 @@ static void mlx5i_cleanup_rx(struct mlx5e_priv *priv)
{
mlx5i_destroy_flow_steering(priv);
mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
mlx5e_destroy_indirect_tirs(priv, true);
mlx5e_destroy_indirect_tirs(priv);
mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
mlx5e_destroy_rqt(priv, &priv->indir_rqt);
mlx5e_close_drop_rq(&priv->drop_rq);

View File

@ -965,6 +965,8 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot)
goto err_cmd_cleanup;
}
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_UP);
err = mlx5_core_enable_hca(dev, 0);
if (err) {
mlx5_core_err(dev, "enable hca failed\n");
@ -1026,6 +1028,7 @@ static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot)
err_disable_hca:
mlx5_core_disable_hca(dev, 0);
err_cmd_cleanup:
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
mlx5_cmd_cleanup(dev);
return err;
@ -1043,6 +1046,7 @@ static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot)
}
mlx5_reclaim_startup_pages(dev);
mlx5_core_disable_hca(dev, 0);
mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
mlx5_cmd_cleanup(dev);
return 0;
@ -1191,7 +1195,7 @@ int mlx5_load_one(struct mlx5_core_dev *dev, bool boot)
err = mlx5_function_setup(dev, boot);
if (err)
goto out;
goto err_function;
if (boot) {
err = mlx5_init_once(dev);
@ -1229,6 +1233,7 @@ int mlx5_load_one(struct mlx5_core_dev *dev, bool boot)
mlx5_cleanup_once(dev);
function_teardown:
mlx5_function_teardown(dev, boot);
err_function:
dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
mutex_unlock(&dev->intf_state_mutex);

View File

@ -213,6 +213,12 @@ enum mlx5_port_status {
MLX5_PORT_DOWN = 2,
};
enum mlx5_cmdif_state {
MLX5_CMDIF_STATE_UNINITIALIZED,
MLX5_CMDIF_STATE_UP,
MLX5_CMDIF_STATE_DOWN,
};
struct mlx5_cmd_first {
__be32 data[4];
};
@ -258,6 +264,7 @@ struct mlx5_cmd_stats {
struct mlx5_cmd {
struct mlx5_nb nb;
enum mlx5_cmdif_state state;
void *cmd_alloc_buf;
dma_addr_t alloc_dma;
int alloc_size;
@ -284,6 +291,7 @@ struct mlx5_cmd {
struct semaphore sem;
struct semaphore pages_sem;
int mode;
u16 allowed_opcode;
struct mlx5_cmd_work_ent *ent_arr[MLX5_MAX_COMMANDS];
struct dma_pool *pool;
struct mlx5_cmd_debug dbg;
@ -743,6 +751,7 @@ struct mlx5_cmd_work_ent {
struct delayed_work cb_timeout_work;
void *context;
int idx;
struct completion handling;
struct completion done;
struct mlx5_cmd *cmd;
struct work_struct work;
@ -874,10 +883,17 @@ mlx5_frag_buf_get_idx_last_contig_stride(struct mlx5_frag_buf_ctrl *fbc, u32 ix)
return min_t(u32, last_frag_stride_idx - fbc->strides_offset, fbc->sz_m1);
}
enum {
CMD_ALLOWED_OPCODE_ALL,
};
int mlx5_cmd_init(struct mlx5_core_dev *dev);
void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
void mlx5_cmd_set_state(struct mlx5_core_dev *dev,
enum mlx5_cmdif_state cmdif_state);
void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
void mlx5_cmd_use_polling(struct mlx5_core_dev *dev);
void mlx5_cmd_allowed_opcode(struct mlx5_core_dev *dev, u16 opcode);
struct mlx5_async_ctx {
struct mlx5_core_dev *dev;