Merge remote-tracking branch 'mlx5-next/mlx5-next' into wip/dl-for-next

Merging tip of mlx5-next in order to get changes related to adding
XRQ support to the DEVX interface needed prior to the following two
patches.

Signed-off-by: Doug Ledford <dledford@redhat.com>
This commit is contained in:
Doug Ledford 2019-08-13 12:19:19 -04:00
commit 749b9eef50
14 changed files with 314 additions and 269 deletions

View File

@ -930,6 +930,7 @@ static bool devx_is_general_cmd(void *in, struct mlx5_ib_dev *dev)
case MLX5_CMD_OP_QUERY_CONG_STATUS:
case MLX5_CMD_OP_QUERY_CONG_PARAMS:
case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
case MLX5_CMD_OP_QUERY_LAG:
return true;
default:
return false;

View File

@ -86,7 +86,7 @@ struct mlx5_core_srq *mlx5_cmd_get_srq(struct mlx5_ib_dev *dev, u32 srqn)
xa_lock(&table->array);
srq = xa_load(&table->array, srqn);
if (srq)
atomic_inc(&srq->common.refcount);
refcount_inc(&srq->common.refcount);
xa_unlock(&table->array);
return srq;
@ -592,7 +592,7 @@ int mlx5_cmd_create_srq(struct mlx5_ib_dev *dev, struct mlx5_core_srq *srq,
if (err)
return err;
atomic_set(&srq->common.refcount, 1);
refcount_set(&srq->common.refcount, 1);
init_completion(&srq->common.free);
err = xa_err(xa_store_irq(&table->array, srq->srqn, srq, GFP_KERNEL));
@ -675,7 +675,7 @@ static int srq_event_notifier(struct notifier_block *nb,
xa_lock(&table->array);
srq = xa_load(&table->array, srqn);
if (srq)
atomic_inc(&srq->common.refcount);
refcount_inc(&srq->common.refcount);
xa_unlock(&table->array);
if (!srq)

View File

@ -446,6 +446,8 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op,
case MLX5_CMD_OP_CREATE_UMEM:
case MLX5_CMD_OP_DESTROY_UMEM:
case MLX5_CMD_OP_ALLOC_MEMIC:
case MLX5_CMD_OP_MODIFY_XRQ:
case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
*status = MLX5_DRIVER_STATUS_ABORTED;
*synd = MLX5_DRIVER_SYND;
return -EIO;
@ -637,6 +639,8 @@ const char *mlx5_command_str(int command)
MLX5_COMMAND_STR_CASE(DESTROY_UCTX);
MLX5_COMMAND_STR_CASE(CREATE_UMEM);
MLX5_COMMAND_STR_CASE(DESTROY_UMEM);
MLX5_COMMAND_STR_CASE(RELEASE_XRQ_ERROR);
MLX5_COMMAND_STR_CASE(MODIFY_XRQ);
default: return "unknown command opcode";
}
}

View File

@ -215,11 +215,7 @@ static int mlx5_eq_async_int(struct notifier_block *nb,
*/
dma_rmb();
if (likely(eqe->type < MLX5_EVENT_TYPE_MAX))
atomic_notifier_call_chain(&eqt->nh[eqe->type], eqe->type, eqe);
else
mlx5_core_warn_once(dev, "notifier_call_chain is not setup for eqe: %d\n", eqe->type);
atomic_notifier_call_chain(&eqt->nh[eqe->type], eqe->type, eqe);
atomic_notifier_call_chain(&eqt->nh[MLX5_EVENT_TYPE_NOTIFY_ANY], eqe->type, eqe);
++eq->cons_index;
@ -945,9 +941,6 @@ int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
{
struct mlx5_eq_table *eqt = dev->priv.eq_table;
if (nb->event_type >= MLX5_EVENT_TYPE_MAX)
return -EINVAL;
return atomic_notifier_chain_register(&eqt->nh[nb->event_type], &nb->nb);
}
EXPORT_SYMBOL(mlx5_eq_notifier_register);
@ -956,9 +949,6 @@ int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb)
{
struct mlx5_eq_table *eqt = dev->priv.eq_table;
if (nb->event_type >= MLX5_EVENT_TYPE_MAX)
return -EINVAL;
return atomic_notifier_chain_unregister(&eqt->nh[nb->event_type], &nb->nb);
}
EXPORT_SYMBOL(mlx5_eq_notifier_unregister);

View File

@ -58,20 +58,9 @@ struct vport_addr {
bool mc_promisc;
};
enum {
UC_ADDR_CHANGE = BIT(0),
MC_ADDR_CHANGE = BIT(1),
PROMISC_CHANGE = BIT(3),
};
static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw);
static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw);
/* Vport context events */
#define SRIOV_VPORT_EVENTS (UC_ADDR_CHANGE | \
MC_ADDR_CHANGE | \
PROMISC_CHANGE)
struct mlx5_vport *__must_check
mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num)
{
@ -108,13 +97,13 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
MLX5_SET(nic_vport_context, nic_vport_ctx, arm_change_event, 1);
if (events_mask & UC_ADDR_CHANGE)
if (events_mask & MLX5_VPORT_UC_ADDR_CHANGE)
MLX5_SET(nic_vport_context, nic_vport_ctx,
event_on_uc_address_change, 1);
if (events_mask & MC_ADDR_CHANGE)
if (events_mask & MLX5_VPORT_MC_ADDR_CHANGE)
MLX5_SET(nic_vport_context, nic_vport_ctx,
event_on_mc_address_change, 1);
if (events_mask & PROMISC_CHANGE)
if (events_mask & MLX5_VPORT_PROMISC_CHANGE)
MLX5_SET(nic_vport_context, nic_vport_ctx,
event_on_promisc_change, 1);
@ -463,6 +452,22 @@ static int esw_create_legacy_table(struct mlx5_eswitch *esw)
return err;
}
#define MLX5_LEGACY_SRIOV_VPORT_EVENTS (MLX5_VPORT_UC_ADDR_CHANGE | \
MLX5_VPORT_MC_ADDR_CHANGE | \
MLX5_VPORT_PROMISC_CHANGE)
static int esw_legacy_enable(struct mlx5_eswitch *esw)
{
int ret;
ret = esw_create_legacy_table(esw);
if (ret)
return ret;
mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_LEGACY_SRIOV_VPORT_EVENTS);
return 0;
}
static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
{
esw_cleanup_vepa_rules(esw);
@ -470,6 +475,19 @@ static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
esw_destroy_legacy_vepa_table(esw);
}
static void esw_legacy_disable(struct mlx5_eswitch *esw)
{
struct esw_mc_addr *mc_promisc;
mlx5_eswitch_disable_pf_vf_vports(esw);
mc_promisc = &esw->mc_promisc;
if (mc_promisc->uplink_rule)
mlx5_del_flow_rules(mc_promisc->uplink_rule);
esw_destroy_legacy_table(esw);
}
/* E-Switch vport UC/MC lists management */
typedef int (*vport_addr_action)(struct mlx5_eswitch *esw,
struct vport_addr *vaddr);
@ -901,21 +919,21 @@ static void esw_vport_change_handle_locked(struct mlx5_vport *vport)
esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n",
vport->vport, mac);
if (vport->enabled_events & UC_ADDR_CHANGE) {
if (vport->enabled_events & MLX5_VPORT_UC_ADDR_CHANGE) {
esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC);
esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC);
}
if (vport->enabled_events & MC_ADDR_CHANGE)
if (vport->enabled_events & MLX5_VPORT_MC_ADDR_CHANGE)
esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC);
if (vport->enabled_events & PROMISC_CHANGE) {
if (vport->enabled_events & MLX5_VPORT_PROMISC_CHANGE) {
esw_update_vport_rx_mode(esw, vport);
if (!IS_ERR_OR_NULL(vport->allmulti_rule))
esw_update_vport_mc_promisc(esw, vport);
}
if (vport->enabled_events & (PROMISC_CHANGE | MC_ADDR_CHANGE))
if (vport->enabled_events & (MLX5_VPORT_PROMISC_CHANGE | MLX5_VPORT_MC_ADDR_CHANGE))
esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC);
esw_debug(esw->dev, "vport[%d] Context Changed: Done\n", vport->vport);
@ -1393,18 +1411,49 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
return err;
}
static bool element_type_supported(struct mlx5_eswitch *esw, int type)
{
const struct mlx5_core_dev *dev = esw->dev;
switch (type) {
case SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR:
return MLX5_CAP_QOS(dev, esw_element_type) &
ELEMENT_TYPE_CAP_MASK_TASR;
case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT:
return MLX5_CAP_QOS(dev, esw_element_type) &
ELEMENT_TYPE_CAP_MASK_VPORT;
case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC:
return MLX5_CAP_QOS(dev, esw_element_type) &
ELEMENT_TYPE_CAP_MASK_VPORT_TC;
case SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC:
return MLX5_CAP_QOS(dev, esw_element_type) &
ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC;
}
return false;
}
/* Vport QoS management */
static int esw_create_tsar(struct mlx5_eswitch *esw)
static void esw_create_tsar(struct mlx5_eswitch *esw)
{
u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
struct mlx5_core_dev *dev = esw->dev;
__be32 *attr;
int err;
if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
return 0;
return;
if (!element_type_supported(esw, SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR))
return;
if (esw->qos.enabled)
return -EEXIST;
return;
MLX5_SET(scheduling_context, tsar_ctx, element_type,
SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR);
attr = MLX5_ADDR_OF(scheduling_context, tsar_ctx, element_attributes);
*attr = cpu_to_be32(TSAR_ELEMENT_TSAR_TYPE_DWRR << 16);
err = mlx5_create_scheduling_element_cmd(dev,
SCHEDULING_HIERARCHY_E_SWITCH,
@ -1412,11 +1461,10 @@ static int esw_create_tsar(struct mlx5_eswitch *esw)
&esw->qos.root_tsar_id);
if (err) {
esw_warn(esw->dev, "E-Switch create TSAR failed (%d)\n", err);
return err;
return;
}
esw->qos.enabled = true;
return 0;
}
static void esw_destroy_tsar(struct mlx5_eswitch *esw)
@ -1619,7 +1667,7 @@ static void esw_vport_destroy_drop_counters(struct mlx5_vport *vport)
}
static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
int enable_events)
enum mlx5_eswitch_vport_event enabled_events)
{
u16 vport_num = vport->vport;
@ -1641,7 +1689,7 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
esw_warn(esw->dev, "Failed to attach vport %d to eswitch rate limiter", vport_num);
/* Sync with current vport context */
vport->enabled_events = enable_events;
vport->enabled_events = enabled_events;
vport->enabled = true;
/* Esw manager is trusted by default. Host PF (vport 0) is trusted as well
@ -1770,47 +1818,15 @@ static void mlx5_eswitch_event_handlers_unregister(struct mlx5_eswitch *esw)
/* Public E-Switch API */
#define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev))
int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode)
/* mlx5_eswitch_enable_pf_vf_vports() enables vports of PF, ECPF and VFs
* whichever are present on the eswitch.
*/
void
mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
enum mlx5_eswitch_vport_event enabled_events)
{
struct mlx5_vport *vport;
int err;
int i, enabled_events;
if (!ESW_ALLOWED(esw) ||
!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
esw_warn(esw->dev, "FDB is not supported, aborting ...\n");
return -EOPNOTSUPP;
}
if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support))
esw_warn(esw->dev, "ingress ACL is not supported by FW\n");
if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support))
esw_warn(esw->dev, "engress ACL is not supported by FW\n");
esw->mode = mode;
mlx5_lag_update(esw->dev);
if (mode == MLX5_ESWITCH_LEGACY) {
err = esw_create_legacy_table(esw);
if (err)
goto abort;
} else {
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
err = esw_offloads_init(esw);
}
if (err)
goto abort;
err = esw_create_tsar(esw);
if (err)
esw_warn(esw->dev, "Failed to create eswitch TSAR");
enabled_events = (mode == MLX5_ESWITCH_LEGACY) ? SRIOV_VPORT_EVENTS :
UC_ADDR_CHANGE;
int i;
/* Enable PF vport */
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
@ -1825,6 +1841,52 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode)
/* Enable VF vports */
mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
esw_enable_vport(esw, vport, enabled_events);
}
/* mlx5_eswitch_disable_pf_vf_vports() disables vports of PF, ECPF and VFs
* whichever are previously enabled on the eswitch.
*/
void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw)
{
struct mlx5_vport *vport;
int i;
mlx5_esw_for_all_vports_reverse(esw, i, vport)
esw_disable_vport(esw, vport);
}
int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode)
{
int err;
if (!ESW_ALLOWED(esw) ||
!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
esw_warn(esw->dev, "FDB is not supported, aborting ...\n");
return -EOPNOTSUPP;
}
if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support))
esw_warn(esw->dev, "ingress ACL is not supported by FW\n");
if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support))
esw_warn(esw->dev, "engress ACL is not supported by FW\n");
esw_create_tsar(esw);
esw->mode = mode;
mlx5_lag_update(esw->dev);
if (mode == MLX5_ESWITCH_LEGACY) {
err = esw_legacy_enable(esw);
} else {
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
err = esw_offloads_enable(esw);
}
if (err)
goto abort;
mlx5_eswitch_event_handlers_register(esw);
@ -1847,10 +1909,7 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int mode)
void mlx5_eswitch_disable(struct mlx5_eswitch *esw)
{
struct esw_mc_addr *mc_promisc;
struct mlx5_vport *vport;
int old_mode;
int i;
if (!ESW_ALLOWED(esw) || esw->mode == MLX5_ESWITCH_NONE)
return;
@ -1859,22 +1918,15 @@ void mlx5_eswitch_disable(struct mlx5_eswitch *esw)
esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
esw->esw_funcs.num_vfs, esw->enabled_vports);
mc_promisc = &esw->mc_promisc;
mlx5_eswitch_event_handlers_unregister(esw);
mlx5_esw_for_all_vports(esw, i, vport)
esw_disable_vport(esw, vport);
if (mc_promisc && mc_promisc->uplink_rule)
mlx5_del_flow_rules(mc_promisc->uplink_rule);
if (esw->mode == MLX5_ESWITCH_LEGACY)
esw_legacy_disable(esw);
else if (esw->mode == MLX5_ESWITCH_OFFLOADS)
esw_offloads_disable(esw);
esw_destroy_tsar(esw);
if (esw->mode == MLX5_ESWITCH_LEGACY)
esw_destroy_legacy_table(esw);
else if (esw->mode == MLX5_ESWITCH_OFFLOADS)
esw_offloads_cleanup(esw);
old_mode = esw->mode;
esw->mode = MLX5_ESWITCH_NONE;

View File

@ -101,6 +101,13 @@ struct mlx5_vport_info {
bool trusted;
};
/* Vport context events */
enum mlx5_eswitch_vport_event {
MLX5_VPORT_UC_ADDR_CHANGE = BIT(0),
MLX5_VPORT_MC_ADDR_CHANGE = BIT(1),
MLX5_VPORT_PROMISC_CHANGE = BIT(3),
};
struct mlx5_vport {
struct mlx5_core_dev *dev;
int vport;
@ -122,7 +129,7 @@ struct mlx5_vport {
} qos;
bool enabled;
u16 enabled_events;
enum mlx5_eswitch_vport_event enabled_events;
};
enum offloads_fdb_flags {
@ -207,8 +214,11 @@ enum {
struct mlx5_eswitch {
struct mlx5_core_dev *dev;
struct mlx5_nb nb;
/* legacy data structures */
struct mlx5_eswitch_fdb fdb_table;
struct hlist_head mc_table[MLX5_L2_ADDR_HASH_SIZE];
struct esw_mc_addr mc_promisc;
/* end of legacy */
struct workqueue_struct *work_queue;
struct mlx5_vport *vports;
u32 flags;
@ -218,7 +228,6 @@ struct mlx5_eswitch {
* and async SRIOV admin state changes
*/
struct mutex state_lock;
struct esw_mc_addr mc_promisc;
struct {
bool enabled;
@ -233,8 +242,8 @@ struct mlx5_eswitch {
struct mlx5_esw_functions esw_funcs;
};
void esw_offloads_cleanup(struct mlx5_eswitch *esw);
int esw_offloads_init(struct mlx5_eswitch *esw);
void esw_offloads_disable(struct mlx5_eswitch *esw);
int esw_offloads_enable(struct mlx5_eswitch *esw);
void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
int esw_offloads_init_reps(struct mlx5_eswitch *esw);
void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
@ -513,6 +522,11 @@ void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw);
(vport) = &(esw)->vports[i], \
(i) < (esw)->total_vports; (i)++)
#define mlx5_esw_for_all_vports_reverse(esw, i, vport) \
for ((i) = (esw)->total_vports - 1; \
(vport) = &(esw)->vports[i], \
(i) >= MLX5_VPORT_PF; (i)--)
#define mlx5_esw_for_each_vf_vport(esw, i, vport, nvfs) \
for ((i) = MLX5_VPORT_FIRST_VF; \
(vport) = &(esw)->vports[(i)], \
@ -574,6 +588,11 @@ bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num);
void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs);
int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data);
void
mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
enum mlx5_eswitch_vport_event enabled_events);
void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw);
#else /* CONFIG_MLX5_ESWITCH */
/* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }

View File

@ -587,13 +587,16 @@ void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
mlx5_del_flow_rules(rule);
}
static int mlx5_eswitch_enable_passing_vport_metadata(struct mlx5_eswitch *esw)
static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
{
u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
u8 fdb_to_vport_reg_c_id;
int err;
if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
return 0;
err = mlx5_eswitch_query_esw_vport_context(esw, esw->manager_vport,
out, sizeof(out));
if (err)
@ -602,33 +605,10 @@ static int mlx5_eswitch_enable_passing_vport_metadata(struct mlx5_eswitch *esw)
fdb_to_vport_reg_c_id = MLX5_GET(query_esw_vport_context_out, out,
esw_vport_context.fdb_to_vport_reg_c_id);
fdb_to_vport_reg_c_id |= MLX5_FDB_TO_VPORT_REG_C_0;
MLX5_SET(modify_esw_vport_context_in, in,
esw_vport_context.fdb_to_vport_reg_c_id, fdb_to_vport_reg_c_id);
MLX5_SET(modify_esw_vport_context_in, in,
field_select.fdb_to_vport_reg_c_id, 1);
return mlx5_eswitch_modify_esw_vport_context(esw, esw->manager_vport,
in, sizeof(in));
}
static int mlx5_eswitch_disable_passing_vport_metadata(struct mlx5_eswitch *esw)
{
u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
u8 fdb_to_vport_reg_c_id;
int err;
err = mlx5_eswitch_query_esw_vport_context(esw, esw->manager_vport,
out, sizeof(out));
if (err)
return err;
fdb_to_vport_reg_c_id = MLX5_GET(query_esw_vport_context_out, out,
esw_vport_context.fdb_to_vport_reg_c_id);
fdb_to_vport_reg_c_id &= ~MLX5_FDB_TO_VPORT_REG_C_0;
if (enable)
fdb_to_vport_reg_c_id |= MLX5_FDB_TO_VPORT_REG_C_0;
else
fdb_to_vport_reg_c_id &= ~MLX5_FDB_TO_VPORT_REG_C_0;
MLX5_SET(modify_esw_vport_context_in, in,
esw_vport_context.fdb_to_vport_reg_c_id, fdb_to_vport_reg_c_id);
@ -2124,7 +2104,7 @@ int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type
return NOTIFY_OK;
}
int esw_offloads_init(struct mlx5_eswitch *esw)
int esw_offloads_enable(struct mlx5_eswitch *esw)
{
int err;
@ -2138,11 +2118,11 @@ int esw_offloads_init(struct mlx5_eswitch *esw)
if (err)
return err;
if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
err = mlx5_eswitch_enable_passing_vport_metadata(esw);
if (err)
goto err_vport_metadata;
}
err = esw_set_passing_vport_metadata(esw, true);
if (err)
goto err_vport_metadata;
mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
err = esw_offloads_load_all_reps(esw);
if (err)
@ -2156,8 +2136,8 @@ int esw_offloads_init(struct mlx5_eswitch *esw)
return 0;
err_reps:
if (mlx5_eswitch_vport_match_metadata_enabled(esw))
mlx5_eswitch_disable_passing_vport_metadata(esw);
mlx5_eswitch_disable_pf_vf_vports(esw);
esw_set_passing_vport_metadata(esw, false);
err_vport_metadata:
esw_offloads_steering_cleanup(esw);
return err;
@ -2182,13 +2162,13 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
return err;
}
void esw_offloads_cleanup(struct mlx5_eswitch *esw)
void esw_offloads_disable(struct mlx5_eswitch *esw)
{
mlx5_rdma_disable_roce(esw->dev);
esw_offloads_devcom_cleanup(esw);
esw_offloads_unload_all_reps(esw);
if (mlx5_eswitch_vport_match_metadata_enabled(esw))
mlx5_eswitch_disable_passing_vport_metadata(esw);
mlx5_eswitch_disable_pf_vf_vports(esw);
esw_set_passing_vport_metadata(esw, false);
esw_offloads_steering_cleanup(esw);
esw->offloads.encap = DEVLINK_ESWITCH_ENCAP_MODE_NONE;
}

View File

@ -566,7 +566,9 @@ static int mlx5_cmd_delete_fte(struct mlx5_flow_root_namespace *ns,
return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
}
int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id)
int mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev,
enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask,
u32 *id)
{
u32 in[MLX5_ST_SZ_DW(alloc_flow_counter_in)] = {0};
u32 out[MLX5_ST_SZ_DW(alloc_flow_counter_out)] = {0};
@ -574,6 +576,7 @@ int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id)
MLX5_SET(alloc_flow_counter_in, in, opcode,
MLX5_CMD_OP_ALLOC_FLOW_COUNTER);
MLX5_SET(alloc_flow_counter_in, in, flow_counter_bulk, alloc_bitmask);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
if (!err)
@ -581,6 +584,11 @@ int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id)
return err;
}
int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id)
{
return mlx5_cmd_fc_bulk_alloc(dev, 0, id);
}
int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id)
{
u32 in[MLX5_ST_SZ_DW(dealloc_flow_counter_in)] = {0};
@ -615,67 +623,24 @@ int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id,
return 0;
}
struct mlx5_cmd_fc_bulk {
u32 id;
int num;
int outlen;
u32 out[0];
};
struct mlx5_cmd_fc_bulk *
mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u32 id, int num)
int mlx5_cmd_fc_get_bulk_query_out_len(int bulk_len)
{
struct mlx5_cmd_fc_bulk *b;
int outlen =
MLX5_ST_SZ_BYTES(query_flow_counter_out) +
MLX5_ST_SZ_BYTES(traffic_counter) * num;
b = kzalloc(sizeof(*b) + outlen, GFP_KERNEL);
if (!b)
return NULL;
b->id = id;
b->num = num;
b->outlen = outlen;
return b;
return MLX5_ST_SZ_BYTES(query_flow_counter_out) +
MLX5_ST_SZ_BYTES(traffic_counter) * bulk_len;
}
void mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk *b)
{
kfree(b);
}
int
mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b)
int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, u32 base_id, int bulk_len,
u32 *out)
{
int outlen = mlx5_cmd_fc_get_bulk_query_out_len(bulk_len);
u32 in[MLX5_ST_SZ_DW(query_flow_counter_in)] = {0};
MLX5_SET(query_flow_counter_in, in, opcode,
MLX5_CMD_OP_QUERY_FLOW_COUNTER);
MLX5_SET(query_flow_counter_in, in, op_mod, 0);
MLX5_SET(query_flow_counter_in, in, flow_counter_id, b->id);
MLX5_SET(query_flow_counter_in, in, num_of_counters, b->num);
return mlx5_cmd_exec(dev, in, sizeof(in), b->out, b->outlen);
}
void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
struct mlx5_cmd_fc_bulk *b, u32 id,
u64 *packets, u64 *bytes)
{
int index = id - b->id;
void *stats;
if (index < 0 || index >= b->num) {
mlx5_core_warn(dev, "Flow counter id (0x%x) out of range (0x%x..0x%x). Counter ignored.\n",
id, b->id, b->id + b->num - 1);
return;
}
stats = MLX5_ADDR_OF(query_flow_counter_out, b->out,
flow_statistics[index]);
*packets = MLX5_GET64(traffic_counter, stats, packets);
*bytes = MLX5_GET64(traffic_counter, stats, octets);
MLX5_SET(query_flow_counter_in, in, flow_counter_id, base_id);
MLX5_SET(query_flow_counter_in, in, num_of_counters, bulk_len);
return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
}
int mlx5_packet_reformat_alloc(struct mlx5_core_dev *dev,

View File

@ -78,20 +78,16 @@ struct mlx5_flow_cmds {
};
int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id);
int mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev,
enum mlx5_fc_bulk_alloc_bitmask alloc_bitmask,
u32 *id);
int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id);
int mlx5_cmd_fc_query(struct mlx5_core_dev *dev, u32 id,
u64 *packets, u64 *bytes);
struct mlx5_cmd_fc_bulk;
struct mlx5_cmd_fc_bulk *
mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u32 id, int num);
void mlx5_cmd_fc_bulk_free(struct mlx5_cmd_fc_bulk *b);
int
mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, struct mlx5_cmd_fc_bulk *b);
void mlx5_cmd_fc_bulk_get(struct mlx5_core_dev *dev,
struct mlx5_cmd_fc_bulk *b, u32 id,
u64 *packets, u64 *bytes);
int mlx5_cmd_fc_get_bulk_query_out_len(int bulk_len);
int mlx5_cmd_fc_bulk_query(struct mlx5_core_dev *dev, u32 base_id, int bulk_len,
u32 *out);
const struct mlx5_flow_cmds *mlx5_fs_cmd_get_default(enum fs_flow_table_type type);

View File

@ -75,7 +75,7 @@ struct mlx5_fc {
* access to counter list:
* - create (user context)
* - mlx5_fc_create() only adds to an addlist to be used by
* mlx5_fc_stats_query_work(). addlist is a lockless single linked list
* mlx5_fc_stats_work(). addlist is a lockless single linked list
* that doesn't require any additional synchronization when adding single
* node.
* - spawn thread to do the actual destroy
@ -136,72 +136,69 @@ static void mlx5_fc_stats_remove(struct mlx5_core_dev *dev,
spin_unlock(&fc_stats->counters_idr_lock);
}
/* The function returns the last counter that was queried so the caller
* function can continue calling it till all counters are queried.
*/
static struct mlx5_fc *mlx5_fc_stats_query(struct mlx5_core_dev *dev,
struct mlx5_fc *first,
u32 last_id)
static int get_max_bulk_query_len(struct mlx5_core_dev *dev)
{
return min_t(int, MLX5_SW_MAX_COUNTERS_BULK,
(1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk)));
}
static void update_counter_cache(int index, u32 *bulk_raw_data,
struct mlx5_fc_cache *cache)
{
void *stats = MLX5_ADDR_OF(query_flow_counter_out, bulk_raw_data,
flow_statistics[index]);
u64 packets = MLX5_GET64(traffic_counter, stats, packets);
u64 bytes = MLX5_GET64(traffic_counter, stats, octets);
if (cache->packets == packets)
return;
cache->packets = packets;
cache->bytes = bytes;
cache->lastuse = jiffies;
}
static void mlx5_fc_stats_query_counter_range(struct mlx5_core_dev *dev,
struct mlx5_fc *first,
u32 last_id)
{
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
struct mlx5_fc *counter = NULL;
struct mlx5_cmd_fc_bulk *b;
bool more = false;
u32 afirst_id;
int num;
bool query_more_counters = (first->id <= last_id);
int max_bulk_len = get_max_bulk_query_len(dev);
u32 *data = fc_stats->bulk_query_out;
struct mlx5_fc *counter = first;
u32 bulk_base_id;
int bulk_len;
int err;
int max_bulk = min_t(int, MLX5_SW_MAX_COUNTERS_BULK,
(1 << MLX5_CAP_GEN(dev, log_max_flow_counter_bulk)));
while (query_more_counters) {
/* first id must be aligned to 4 when using bulk query */
bulk_base_id = counter->id & ~0x3;
/* first id must be aligned to 4 when using bulk query */
afirst_id = first->id & ~0x3;
/* number of counters to query inc. the last counter */
bulk_len = min_t(int, max_bulk_len,
ALIGN(last_id - bulk_base_id + 1, 4));
/* number of counters to query inc. the last counter */
num = ALIGN(last_id - afirst_id + 1, 4);
if (num > max_bulk) {
num = max_bulk;
last_id = afirst_id + num - 1;
}
b = mlx5_cmd_fc_bulk_alloc(dev, afirst_id, num);
if (!b) {
mlx5_core_err(dev, "Error allocating resources for bulk query\n");
return NULL;
}
err = mlx5_cmd_fc_bulk_query(dev, b);
if (err) {
mlx5_core_err(dev, "Error doing bulk query: %d\n", err);
goto out;
}
counter = first;
list_for_each_entry_from(counter, &fc_stats->counters, list) {
struct mlx5_fc_cache *c = &counter->cache;
u64 packets;
u64 bytes;
if (counter->id > last_id) {
more = true;
break;
err = mlx5_cmd_fc_bulk_query(dev, bulk_base_id, bulk_len,
data);
if (err) {
mlx5_core_err(dev, "Error doing bulk query: %d\n", err);
return;
}
query_more_counters = false;
mlx5_cmd_fc_bulk_get(dev, b,
counter->id, &packets, &bytes);
list_for_each_entry_from(counter, &fc_stats->counters, list) {
int counter_index = counter->id - bulk_base_id;
struct mlx5_fc_cache *cache = &counter->cache;
if (c->packets == packets)
continue;
if (counter->id >= bulk_base_id + bulk_len) {
query_more_counters = true;
break;
}
c->packets = packets;
c->bytes = bytes;
c->lastuse = jiffies;
update_counter_cache(counter_index, data, cache);
}
}
out:
mlx5_cmd_fc_bulk_free(b);
return more ? counter : NULL;
}
static void mlx5_free_fc(struct mlx5_core_dev *dev,
@ -244,8 +241,8 @@ static void mlx5_fc_stats_work(struct work_struct *work)
counter = list_first_entry(&fc_stats->counters, struct mlx5_fc,
list);
while (counter)
counter = mlx5_fc_stats_query(dev, counter, last->id);
if (counter)
mlx5_fc_stats_query_counter_range(dev, counter, last->id);
fc_stats->next_query = now + fc_stats->sampling_interval;
}
@ -324,6 +321,8 @@ EXPORT_SYMBOL(mlx5_fc_destroy);
int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
{
struct mlx5_fc_stats *fc_stats = &dev->priv.fc_stats;
int max_bulk_len;
int max_out_len;
spin_lock_init(&fc_stats->counters_idr_lock);
idr_init(&fc_stats->counters_idr);
@ -331,14 +330,24 @@ int mlx5_init_fc_stats(struct mlx5_core_dev *dev)
init_llist_head(&fc_stats->addlist);
init_llist_head(&fc_stats->dellist);
max_bulk_len = get_max_bulk_query_len(dev);
max_out_len = mlx5_cmd_fc_get_bulk_query_out_len(max_bulk_len);
fc_stats->bulk_query_out = kzalloc(max_out_len, GFP_KERNEL);
if (!fc_stats->bulk_query_out)
return -ENOMEM;
fc_stats->wq = create_singlethread_workqueue("mlx5_fc");
if (!fc_stats->wq)
return -ENOMEM;
goto err_wq_create;
fc_stats->sampling_interval = MLX5_FC_STATS_PERIOD;
INIT_DELAYED_WORK(&fc_stats->work, mlx5_fc_stats_work);
return 0;
err_wq_create:
kfree(fc_stats->bulk_query_out);
return -ENOMEM;
}
void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
@ -352,6 +361,8 @@ void mlx5_cleanup_fc_stats(struct mlx5_core_dev *dev)
destroy_workqueue(dev->priv.fc_stats.wq);
dev->priv.fc_stats.wq = NULL;
kfree(fc_stats->bulk_query_out);
idr_destroy(&fc_stats->counters_idr);
tmplist = llist_del_all(&fc_stats->addlist);

View File

@ -1217,8 +1217,10 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, bool cleanup)
{
int err = 0;
if (cleanup)
if (cleanup) {
mlx5_unregister_device(dev);
mlx5_drain_health_wq(dev);
}
mutex_lock(&dev->intf_state_mutex);
if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
@ -1369,7 +1371,6 @@ static void remove_one(struct pci_dev *pdev)
mlx5_crdump_disable(dev);
mlx5_devlink_unregister(devlink);
mlx5_unregister_device(dev);
if (mlx5_unload_one(dev, true)) {
mlx5_core_err(dev, "mlx5_unload_one failed\n");

View File

@ -53,7 +53,7 @@ mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn)
common = radix_tree_lookup(&table->tree, rsn);
if (common)
atomic_inc(&common->refcount);
refcount_inc(&common->refcount);
spin_unlock_irqrestore(&table->lock, flags);
@ -62,7 +62,7 @@ mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn)
void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common)
{
if (atomic_dec_and_test(&common->refcount))
if (refcount_dec_and_test(&common->refcount))
complete(&common->free);
}
@ -162,7 +162,7 @@ static int rsc_event_notifier(struct notifier_block *nb,
common = mlx5_get_rsc(table, rsn);
if (!common) {
mlx5_core_warn(dev, "Async event for bogus resource 0x%x\n", rsn);
mlx5_core_dbg(dev, "Async event for unknown resource 0x%x\n", rsn);
return NOTIFY_OK;
}
@ -209,7 +209,7 @@ static int create_resource_common(struct mlx5_core_dev *dev,
if (err)
return err;
atomic_set(&qp->common.refcount, 1);
refcount_set(&qp->common.refcount, 1);
init_completion(&qp->common.free);
qp->pid = current->pid;

View File

@ -47,6 +47,7 @@
#include <linux/interrupt.h>
#include <linux/idr.h>
#include <linux/notifier.h>
#include <linux/refcount.h>
#include <linux/mlx5/device.h>
#include <linux/mlx5/doorbell.h>
@ -398,7 +399,7 @@ enum mlx5_res_type {
struct mlx5_core_rsc_common {
enum mlx5_res_type res;
atomic_t refcount;
refcount_t refcount;
struct completion free;
};
@ -488,6 +489,7 @@ struct mlx5_fc_stats {
struct delayed_work work;
unsigned long next_query;
unsigned long sampling_interval; /* jiffies */
u32 *bulk_query_out;
};
struct mlx5_events;

View File

@ -172,6 +172,8 @@ enum {
MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY = 0x725,
MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY = 0x726,
MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS = 0x727,
MLX5_CMD_OP_RELEASE_XRQ_ERROR = 0x729,
MLX5_CMD_OP_MODIFY_XRQ = 0x72a,
MLX5_CMD_OP_QUERY_ESW_FUNCTIONS = 0x740,
MLX5_CMD_OP_QUERY_VPORT_STATE = 0x750,
MLX5_CMD_OP_MODIFY_VPORT_STATE = 0x751,
@ -1040,6 +1042,21 @@ enum {
MLX5_UCTX_CAP_INTERNAL_DEV_RES = 1UL << 1,
};
#define MLX5_FC_BULK_SIZE_FACTOR 128
enum mlx5_fc_bulk_alloc_bitmask {
MLX5_FC_BULK_128 = (1 << 0),
MLX5_FC_BULK_256 = (1 << 1),
MLX5_FC_BULK_512 = (1 << 2),
MLX5_FC_BULK_1024 = (1 << 3),
MLX5_FC_BULK_2048 = (1 << 4),
MLX5_FC_BULK_4096 = (1 << 5),
MLX5_FC_BULK_8192 = (1 << 6),
MLX5_FC_BULK_16384 = (1 << 7),
};
#define MLX5_FC_BULK_NUM_FCS(fc_enum) (MLX5_FC_BULK_SIZE_FACTOR * (fc_enum))
struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_0[0x30];
u8 vhca_id[0x10];
@ -1244,7 +1261,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_2e0[0x7];
u8 max_qp_mcg[0x19];
u8 reserved_at_300[0x18];
u8 reserved_at_300[0x10];
u8 flow_counter_bulk_alloc[0x8];
u8 log_max_mcg[0x8];
u8 reserved_at_320[0x3];
@ -2766,7 +2784,7 @@ struct mlx5_ifc_traffic_counter_bits {
struct mlx5_ifc_tisc_bits {
u8 strict_lag_tx_port_affinity[0x1];
u8 tls_en[0x1];
u8 reserved_at_1[0x2];
u8 reserved_at_2[0x2];
u8 lag_tx_port_affinity[0x04];
u8 reserved_at_8[0x4];
@ -2941,6 +2959,13 @@ enum {
SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC = 0x3,
};
enum {
ELEMENT_TYPE_CAP_MASK_TASR = 1 << 0,
ELEMENT_TYPE_CAP_MASK_VPORT = 1 << 1,
ELEMENT_TYPE_CAP_MASK_VPORT_TC = 1 << 2,
ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC = 1 << 3,
};
struct mlx5_ifc_scheduling_context_bits {
u8 element_type[0x8];
u8 reserved_at_8[0x18];
@ -7815,7 +7840,8 @@ struct mlx5_ifc_alloc_flow_counter_in_bits {
u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 reserved_at_40[0x40];
u8 reserved_at_40[0x38];
u8 flow_counter_bulk[0x8];
};
struct mlx5_ifc_add_vxlan_udp_dport_out_bits {
@ -9568,8 +9594,6 @@ struct mlx5_ifc_query_lag_out_bits {
u8 syndrome[0x20];
u8 reserved_at_40[0x40];
struct mlx5_ifc_lagc_bits ctx;
};