mirror of https://gitee.com/openkylin/linux.git
net/mlx5: E-Switch, Handle representors creation in handler context
Unified representors creation in esw_functions_changed context handler. Emulate the esw_function_changed event for FW/HW that does not support this event. Signed-off-by: Vu Pham <vuhuong@mellanox.com> Reviewed-by: Parav Pandit <parav@mellanox.com> Reviewed-by: Bodong Wang <bodong@mellanox.com> Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
This commit is contained in:
parent
b8a92577f4
commit
ac35dcd6e4
|
@ -1720,7 +1720,6 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
|
||||||
{
|
{
|
||||||
struct mlx5_vport *vport;
|
struct mlx5_vport *vport;
|
||||||
int total_nvports = 0;
|
int total_nvports = 0;
|
||||||
u16 vf_nvports = 0;
|
|
||||||
int err;
|
int err;
|
||||||
int i, enabled_events;
|
int i, enabled_events;
|
||||||
|
|
||||||
|
@ -1739,16 +1738,11 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
|
||||||
esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d) mode (%d)\n", nvfs, mode);
|
esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d) mode (%d)\n", nvfs, mode);
|
||||||
|
|
||||||
if (mode == SRIOV_OFFLOADS) {
|
if (mode == SRIOV_OFFLOADS) {
|
||||||
if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
|
if (mlx5_core_is_ecpf_esw_manager(esw->dev))
|
||||||
err = mlx5_esw_query_functions(esw->dev, &vf_nvports);
|
|
||||||
if (err)
|
|
||||||
return err;
|
|
||||||
total_nvports = esw->total_vports;
|
total_nvports = esw->total_vports;
|
||||||
} else {
|
else
|
||||||
vf_nvports = nvfs;
|
|
||||||
total_nvports = nvfs + MLX5_SPECIAL_VPORTS(esw->dev);
|
total_nvports = nvfs + MLX5_SPECIAL_VPORTS(esw->dev);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
esw->mode = mode;
|
esw->mode = mode;
|
||||||
|
|
||||||
|
@ -1761,7 +1755,7 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
|
||||||
} else {
|
} else {
|
||||||
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
|
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
|
||||||
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
|
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
|
||||||
err = esw_offloads_init(esw, vf_nvports, total_nvports);
|
err = esw_offloads_init(esw, nvfs, total_nvports);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (err)
|
if (err)
|
||||||
|
|
|
@ -1436,34 +1436,13 @@ static int esw_offloads_load_vf_reps(struct mlx5_eswitch *esw, int nvports)
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __load_reps_all_vport(struct mlx5_eswitch *esw, int nvports,
|
static int esw_offloads_load_special_vport(struct mlx5_eswitch *esw)
|
||||||
u8 rep_type)
|
|
||||||
{
|
|
||||||
int err;
|
|
||||||
|
|
||||||
/* Special vports must be loaded first. */
|
|
||||||
err = __load_reps_special_vport(esw, rep_type);
|
|
||||||
if (err)
|
|
||||||
return err;
|
|
||||||
|
|
||||||
err = __load_reps_vf_vport(esw, nvports, rep_type);
|
|
||||||
if (err)
|
|
||||||
goto err_vfs;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
err_vfs:
|
|
||||||
__unload_reps_special_vport(esw, rep_type);
|
|
||||||
return err;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int esw_offloads_load_all_reps(struct mlx5_eswitch *esw, int nvports)
|
|
||||||
{
|
{
|
||||||
u8 rep_type = 0;
|
u8 rep_type = 0;
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
|
for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
|
||||||
err = __load_reps_all_vport(esw, nvports, rep_type);
|
err = __load_reps_special_vport(esw, rep_type);
|
||||||
if (err)
|
if (err)
|
||||||
goto err_reps;
|
goto err_reps;
|
||||||
}
|
}
|
||||||
|
@ -1472,7 +1451,7 @@ static int esw_offloads_load_all_reps(struct mlx5_eswitch *esw, int nvports)
|
||||||
|
|
||||||
err_reps:
|
err_reps:
|
||||||
while (rep_type-- > 0)
|
while (rep_type-- > 0)
|
||||||
__unload_reps_all_vport(esw, nvports, rep_type);
|
__unload_reps_special_vport(esw, rep_type);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1811,6 +1790,21 @@ static void esw_functions_changed_event_handler(struct work_struct *work)
|
||||||
kfree(host_work);
|
kfree(host_work);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void esw_emulate_event_handler(struct work_struct *work)
|
||||||
|
{
|
||||||
|
struct mlx5_host_work *host_work =
|
||||||
|
container_of(work, struct mlx5_host_work, work);
|
||||||
|
struct mlx5_eswitch *esw = host_work->esw;
|
||||||
|
int err;
|
||||||
|
|
||||||
|
if (esw->esw_funcs.num_vfs) {
|
||||||
|
err = esw_offloads_load_vf_reps(esw, esw->esw_funcs.num_vfs);
|
||||||
|
if (err)
|
||||||
|
esw_warn(esw->dev, "Load vf reps err=%d\n", err);
|
||||||
|
}
|
||||||
|
kfree(host_work);
|
||||||
|
}
|
||||||
|
|
||||||
static int esw_functions_changed_event(struct notifier_block *nb,
|
static int esw_functions_changed_event(struct notifier_block *nb,
|
||||||
unsigned long type, void *data)
|
unsigned long type, void *data)
|
||||||
{
|
{
|
||||||
|
@ -1827,7 +1821,11 @@ static int esw_functions_changed_event(struct notifier_block *nb,
|
||||||
|
|
||||||
host_work->esw = esw;
|
host_work->esw = esw;
|
||||||
|
|
||||||
INIT_WORK(&host_work->work, esw_functions_changed_event_handler);
|
if (mlx5_eswitch_is_funcs_handler(esw->dev))
|
||||||
|
INIT_WORK(&host_work->work,
|
||||||
|
esw_functions_changed_event_handler);
|
||||||
|
else
|
||||||
|
INIT_WORK(&host_work->work, esw_emulate_event_handler);
|
||||||
queue_work(esw->work_queue, &host_work->work);
|
queue_work(esw->work_queue, &host_work->work);
|
||||||
|
|
||||||
return NOTIFY_OK;
|
return NOTIFY_OK;
|
||||||
|
@ -1836,14 +1834,15 @@ static int esw_functions_changed_event(struct notifier_block *nb,
|
||||||
static void esw_functions_changed_event_init(struct mlx5_eswitch *esw,
|
static void esw_functions_changed_event_init(struct mlx5_eswitch *esw,
|
||||||
u16 vf_nvports)
|
u16 vf_nvports)
|
||||||
{
|
{
|
||||||
if (!mlx5_eswitch_is_funcs_handler(esw->dev))
|
if (mlx5_eswitch_is_funcs_handler(esw->dev)) {
|
||||||
return;
|
esw->esw_funcs.num_vfs = 0;
|
||||||
|
|
||||||
MLX5_NB_INIT(&esw->esw_funcs.nb, esw_functions_changed_event,
|
MLX5_NB_INIT(&esw->esw_funcs.nb, esw_functions_changed_event,
|
||||||
ESW_FUNCTIONS_CHANGED);
|
ESW_FUNCTIONS_CHANGED);
|
||||||
mlx5_eq_notifier_register(esw->dev, &esw->esw_funcs.nb);
|
mlx5_eq_notifier_register(esw->dev, &esw->esw_funcs.nb);
|
||||||
|
} else {
|
||||||
esw->esw_funcs.num_vfs = vf_nvports;
|
esw->esw_funcs.num_vfs = vf_nvports;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void esw_functions_changed_event_cleanup(struct mlx5_eswitch *esw)
|
static void esw_functions_changed_event_cleanup(struct mlx5_eswitch *esw)
|
||||||
{
|
{
|
||||||
|
@ -1863,7 +1862,11 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int vf_nvports,
|
||||||
if (err)
|
if (err)
|
||||||
return err;
|
return err;
|
||||||
|
|
||||||
err = esw_offloads_load_all_reps(esw, vf_nvports);
|
/* Only load special vports reps. VF reps will be loaded in
|
||||||
|
* context of functions_changed event handler through real
|
||||||
|
* or emulated event.
|
||||||
|
*/
|
||||||
|
err = esw_offloads_load_special_vport(esw);
|
||||||
if (err)
|
if (err)
|
||||||
goto err_reps;
|
goto err_reps;
|
||||||
|
|
||||||
|
@ -1873,6 +1876,16 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int vf_nvports,
|
||||||
|
|
||||||
mlx5_rdma_enable_roce(esw->dev);
|
mlx5_rdma_enable_roce(esw->dev);
|
||||||
|
|
||||||
|
/* Call esw_functions_changed event to load VF reps:
|
||||||
|
* 1. HW does not support the event then emulate it
|
||||||
|
* Or
|
||||||
|
* 2. The event was already notified when num_vfs changed
|
||||||
|
* and eswitch was in legacy mode
|
||||||
|
*/
|
||||||
|
esw_functions_changed_event(&esw->esw_funcs.nb.nb,
|
||||||
|
MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED,
|
||||||
|
NULL);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_reps:
|
err_reps:
|
||||||
|
@ -1901,18 +1914,10 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
|
||||||
|
|
||||||
void esw_offloads_cleanup(struct mlx5_eswitch *esw)
|
void esw_offloads_cleanup(struct mlx5_eswitch *esw)
|
||||||
{
|
{
|
||||||
u16 num_vfs;
|
|
||||||
|
|
||||||
esw_functions_changed_event_cleanup(esw);
|
esw_functions_changed_event_cleanup(esw);
|
||||||
|
|
||||||
if (mlx5_eswitch_is_funcs_handler(esw->dev))
|
|
||||||
num_vfs = esw->esw_funcs.num_vfs;
|
|
||||||
else
|
|
||||||
num_vfs = esw->dev->priv.sriov.num_vfs;
|
|
||||||
|
|
||||||
mlx5_rdma_disable_roce(esw->dev);
|
mlx5_rdma_disable_roce(esw->dev);
|
||||||
esw_offloads_devcom_cleanup(esw);
|
esw_offloads_devcom_cleanup(esw);
|
||||||
esw_offloads_unload_all_reps(esw, num_vfs);
|
esw_offloads_unload_all_reps(esw, esw->esw_funcs.num_vfs);
|
||||||
esw_offloads_steering_cleanup(esw);
|
esw_offloads_steering_cleanup(esw);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue