mirror of https://gitee.com/openkylin/linux.git
Merge branch 'ib-guids' into rdma.git for-next
Danit Goldberg says: ==================== This series extends RTNETLINK to provide IB port and node GUIDs, which were configured for Infiniband VFs. The functionality to set VF GUIDs already existed for a long time, and here we are adding the missing "get" so that netlink will be symmetric and various cloud orchestration tools will be able to manage such VFs more naturally. The iproute2 was extended too to present those GUIDs. - ip link show <device> For example: - ip link set ib4 vf 0 node_guid 22:44:33:00:33:11:00:33 - ip link set ib4 vf 0 port_guid 10:21:33:12:00:11:22:10 - ip link show ib4 ib4: <BROADCAST,MULTICAST> mtu 4092 qdisc noop state DOWN mode DEFAULT group default qlen 256 link/infiniband 00:00:0a:2d:fe:80:00:00:00:00:00:00:ec:0d:9a:03:00:44:36:8d brd 00:ff:ff:ff:ff:12:40:1b:ff:ff:00:00:00:00:00:00:ff:ff:ff:ff vf 0 link/infiniband 00:00:0a:2d:fe:80:00:00:00:00:00:00:ec:0d:9a:03:00:44:36:8d brd 00:ff:ff:ff:ff:12:40:1b:ff:ff:00:00:00:00:00:00:ff:ff:ff:ff, spoof checking off, NODE_GUID 22:44:33:00:33:11:00:33, PORT_GUID 10:21:33:12:00:11:22:10, link-state disable, trust off, query_rss off ==================== Based on the mlx5-next branch from git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux for dependencies * branch 'ib-guids': (35 commits) IB/mlx5: Implement callbacks for getting VFs GUID attributes IB/ipoib: Add ndo operation for getting VFs GUID attributes IB/core: Add interfaces to get VF node and port GUIDs net/core: Add support for getting VF GUIDs net/mlx5: Add new chain for netfilter flow table offload net/mlx5: Refactor creating fast path prio chains net/mlx5: Accumulate levels for chains prio namespaces net/mlx5: Define fdb tc levels per prio net/mlx5: Rename FDB_* tc related defines to FDB_TC_* defines net/mlx5: Simplify fdb chain and prio eswitch defines IB/mlx5: Load profile according to RoCE enablement state IB/mlx5: Rename profile and init methods net/mlx5: Handle "enable_roce" devlink param net/mlx5: Document flow_steering_mode devlink param devlink: Add new "enable_roce" generic device param net/mlx5: fix spelling mistake "metdata" -> "metadata" net/mlx5: fix kvfree of uninitialized pointer spec IB/mlx5: Introduce and use mlx5_core_is_vf() net/mlx5: E-switch, Enable metadata on own vport net/mlx5: Refactor ingress acl configuration ... Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
commit
3694e41e41
|
@ -154,6 +154,27 @@ User command examples:
|
|||
values:
|
||||
cmode runtime value smfs
|
||||
|
||||
enable_roce: RoCE enablement state
|
||||
----------------------------------
|
||||
RoCE enablement state controls driver support for RoCE traffic.
|
||||
When RoCE is disabled, there is no gid table, only raw ethernet QPs are supported and traffic on the well known UDP RoCE port is handled as raw ethernet traffic.
|
||||
|
||||
To change RoCE enablement state a user must change the driverinit cmode value and run devlink reload.
|
||||
|
||||
User command examples:
|
||||
|
||||
- Disable RoCE::
|
||||
|
||||
$ devlink dev param set pci/0000:06:00.0 name enable_roce value false cmode driverinit
|
||||
$ devlink dev reload pci/0000:06:00.0
|
||||
|
||||
- Read RoCE enablement state::
|
||||
|
||||
$ devlink dev param show pci/0000:06:00.0 name enable_roce
|
||||
pci/0000:06:00.0:
|
||||
name enable_roce type generic
|
||||
values:
|
||||
cmode driverinit value true
|
||||
|
||||
Devlink health reporters
|
||||
========================
|
||||
|
|
|
@ -0,0 +1,17 @@
|
|||
flow_steering_mode [DEVICE, DRIVER-SPECIFIC]
|
||||
Controls the flow steering mode of the driver.
|
||||
Two modes are supported:
|
||||
1. 'dmfs' - Device managed flow steering.
|
||||
2. 'smfs - Software/Driver managed flow steering.
|
||||
In DMFS mode, the HW steering entities are created and
|
||||
managed through the Firmware.
|
||||
In SMFS mode, the HW steering entities are created and
|
||||
managed though by the driver directly into Hardware
|
||||
without firmware intervention.
|
||||
Type: String
|
||||
Configuration mode: runtime
|
||||
|
||||
enable_roce [DEVICE, GENERIC]
|
||||
Enable handling of RoCE traffic in the device.
|
||||
Defaultly enabled.
|
||||
Configuration mode: driverinit
|
|
@ -65,3 +65,7 @@ reset_dev_on_drv_probe [DEVICE, GENERIC]
|
|||
Reset only if device firmware can be found in the
|
||||
filesystem.
|
||||
Type: u8
|
||||
|
||||
enable_roce [DEVICE, GENERIC]
|
||||
Enable handling of RoCE traffic in the device.
|
||||
Type: Boolean
|
||||
|
|
|
@ -2631,6 +2631,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops)
|
|||
SET_DEVICE_OP(dev_ops, get_port_immutable);
|
||||
SET_DEVICE_OP(dev_ops, get_vector_affinity);
|
||||
SET_DEVICE_OP(dev_ops, get_vf_config);
|
||||
SET_DEVICE_OP(dev_ops, get_vf_guid);
|
||||
SET_DEVICE_OP(dev_ops, get_vf_stats);
|
||||
SET_DEVICE_OP(dev_ops, init_port);
|
||||
SET_DEVICE_OP(dev_ops, invalidate_range);
|
||||
|
|
|
@ -2460,6 +2460,16 @@ int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
|
|||
}
|
||||
EXPORT_SYMBOL(ib_set_vf_guid);
|
||||
|
||||
int ib_get_vf_guid(struct ib_device *device, int vf, u8 port,
|
||||
struct ifla_vf_guid *node_guid,
|
||||
struct ifla_vf_guid *port_guid)
|
||||
{
|
||||
if (!device->ops.get_vf_guid)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
return device->ops.get_vf_guid(device, vf, port, node_guid, port_guid);
|
||||
}
|
||||
EXPORT_SYMBOL(ib_get_vf_guid);
|
||||
/**
|
||||
* ib_map_mr_sg_pi() - Map the dma mapped SG lists for PI (protection
|
||||
* information) and set an appropriate memory region for registration.
|
||||
|
|
|
@ -35,7 +35,7 @@ mlx5_ib_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
|
|||
int vport_index;
|
||||
|
||||
if (rep->vport == MLX5_VPORT_UPLINK)
|
||||
profile = &uplink_rep_profile;
|
||||
profile = &raw_eth_profile;
|
||||
else
|
||||
return mlx5_ib_set_vport_rep(dev, rep);
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
#include "mlx5_ib.h"
|
||||
|
||||
#ifdef CONFIG_MLX5_ESWITCH
|
||||
extern const struct mlx5_ib_profile uplink_rep_profile;
|
||||
extern const struct mlx5_ib_profile raw_eth_profile;
|
||||
|
||||
u8 mlx5_ib_eswitch_mode(struct mlx5_eswitch *esw);
|
||||
struct mlx5_ib_dev *mlx5_ib_get_rep_ibdev(struct mlx5_eswitch *esw,
|
||||
|
|
|
@ -201,3 +201,27 @@ int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
|
|||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
int mlx5_ib_get_vf_guid(struct ib_device *device, int vf, u8 port,
|
||||
struct ifla_vf_guid *node_guid,
|
||||
struct ifla_vf_guid *port_guid)
|
||||
{
|
||||
struct mlx5_ib_dev *dev = to_mdev(device);
|
||||
struct mlx5_core_dev *mdev = dev->mdev;
|
||||
struct mlx5_hca_vport_context *rep;
|
||||
int err;
|
||||
|
||||
rep = kzalloc(sizeof(*rep), GFP_KERNEL);
|
||||
if (!rep)
|
||||
return -ENOMEM;
|
||||
|
||||
err = mlx5_query_hca_vport_context(mdev, 1, 1, vf+1, rep);
|
||||
if (err)
|
||||
goto ex;
|
||||
|
||||
port_guid->guid = rep->port_guid;
|
||||
node_guid->guid = rep->node_guid;
|
||||
ex:
|
||||
kfree(rep);
|
||||
return err;
|
||||
}
|
||||
|
|
|
@ -1019,7 +1019,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
|
|||
if (MLX5_CAP_GEN(mdev, cd))
|
||||
props->device_cap_flags |= IB_DEVICE_CROSS_CHANNEL;
|
||||
|
||||
if (!mlx5_core_is_pf(mdev))
|
||||
if (mlx5_core_is_vf(mdev))
|
||||
props->device_cap_flags |= IB_DEVICE_VIRTUAL_FUNCTION;
|
||||
|
||||
if (mlx5_ib_port_link_layer(ibdev, 1) ==
|
||||
|
@ -5141,7 +5141,6 @@ static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num,
|
|||
immutable->pkey_tbl_len = attr.pkey_tbl_len;
|
||||
immutable->gid_tbl_len = attr.gid_tbl_len;
|
||||
immutable->core_cap_flags = get_core_cap_flags(ibdev, &rep);
|
||||
if ((ll == IB_LINK_LAYER_INFINIBAND) || MLX5_CAP_GEN(dev->mdev, roce))
|
||||
immutable->max_mad_size = IB_MGMT_MAD_SIZE;
|
||||
|
||||
return 0;
|
||||
|
@ -5245,11 +5244,9 @@ static int mlx5_enable_eth(struct mlx5_ib_dev *dev)
|
|||
{
|
||||
int err;
|
||||
|
||||
if (MLX5_CAP_GEN(dev->mdev, roce)) {
|
||||
err = mlx5_nic_vport_enable_roce(dev->mdev);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
err = mlx5_eth_lag_init(dev);
|
||||
if (err)
|
||||
|
@ -5258,7 +5255,6 @@ static int mlx5_enable_eth(struct mlx5_ib_dev *dev)
|
|||
return 0;
|
||||
|
||||
err_disable_roce:
|
||||
if (MLX5_CAP_GEN(dev->mdev, roce))
|
||||
mlx5_nic_vport_disable_roce(dev->mdev);
|
||||
|
||||
return err;
|
||||
|
@ -5267,7 +5263,6 @@ static int mlx5_enable_eth(struct mlx5_ib_dev *dev)
|
|||
static void mlx5_disable_eth(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
mlx5_eth_lag_cleanup(dev);
|
||||
if (MLX5_CAP_GEN(dev->mdev, roce))
|
||||
mlx5_nic_vport_disable_roce(dev->mdev);
|
||||
}
|
||||
|
||||
|
@ -6313,6 +6308,7 @@ static const struct ib_device_ops mlx5_ib_dev_ipoib_enhanced_ops = {
|
|||
|
||||
static const struct ib_device_ops mlx5_ib_dev_sriov_ops = {
|
||||
.get_vf_config = mlx5_ib_get_vf_config,
|
||||
.get_vf_guid = mlx5_ib_get_vf_guid,
|
||||
.get_vf_stats = mlx5_ib_get_vf_stats,
|
||||
.set_vf_guid = mlx5_ib_set_vf_guid,
|
||||
.set_vf_link_state = mlx5_ib_set_vf_link_state,
|
||||
|
@ -6442,7 +6438,7 @@ static const struct ib_device_ops mlx5_ib_dev_port_rep_ops = {
|
|||
.query_port = mlx5_ib_rep_query_port,
|
||||
};
|
||||
|
||||
static int mlx5_ib_stage_rep_non_default_cb(struct mlx5_ib_dev *dev)
|
||||
static int mlx5_ib_stage_raw_eth_non_default_cb(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_port_rep_ops);
|
||||
return 0;
|
||||
|
@ -6482,7 +6478,7 @@ static void mlx5_ib_stage_common_roce_cleanup(struct mlx5_ib_dev *dev)
|
|||
mlx5_remove_netdev_notifier(dev, port_num);
|
||||
}
|
||||
|
||||
static int mlx5_ib_stage_rep_roce_init(struct mlx5_ib_dev *dev)
|
||||
static int mlx5_ib_stage_raw_eth_roce_init(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
struct mlx5_core_dev *mdev = dev->mdev;
|
||||
enum rdma_link_layer ll;
|
||||
|
@ -6498,7 +6494,7 @@ static int mlx5_ib_stage_rep_roce_init(struct mlx5_ib_dev *dev)
|
|||
return err;
|
||||
}
|
||||
|
||||
static void mlx5_ib_stage_rep_roce_cleanup(struct mlx5_ib_dev *dev)
|
||||
static void mlx5_ib_stage_raw_eth_roce_cleanup(struct mlx5_ib_dev *dev)
|
||||
{
|
||||
mlx5_ib_stage_common_roce_cleanup(dev);
|
||||
}
|
||||
|
@ -6817,7 +6813,7 @@ static const struct mlx5_ib_profile pf_profile = {
|
|||
mlx5_ib_stage_delay_drop_cleanup),
|
||||
};
|
||||
|
||||
const struct mlx5_ib_profile uplink_rep_profile = {
|
||||
const struct mlx5_ib_profile raw_eth_profile = {
|
||||
STAGE_CREATE(MLX5_IB_STAGE_INIT,
|
||||
mlx5_ib_stage_init_init,
|
||||
mlx5_ib_stage_init_cleanup),
|
||||
|
@ -6828,11 +6824,11 @@ const struct mlx5_ib_profile uplink_rep_profile = {
|
|||
mlx5_ib_stage_caps_init,
|
||||
NULL),
|
||||
STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB,
|
||||
mlx5_ib_stage_rep_non_default_cb,
|
||||
mlx5_ib_stage_raw_eth_non_default_cb,
|
||||
NULL),
|
||||
STAGE_CREATE(MLX5_IB_STAGE_ROCE,
|
||||
mlx5_ib_stage_rep_roce_init,
|
||||
mlx5_ib_stage_rep_roce_cleanup),
|
||||
mlx5_ib_stage_raw_eth_roce_init,
|
||||
mlx5_ib_stage_raw_eth_roce_cleanup),
|
||||
STAGE_CREATE(MLX5_IB_STAGE_SRQ,
|
||||
mlx5_init_srq_table,
|
||||
mlx5_cleanup_srq_table),
|
||||
|
@ -6908,6 +6904,7 @@ static void *mlx5_ib_add_slave_port(struct mlx5_core_dev *mdev)
|
|||
|
||||
static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
|
||||
{
|
||||
const struct mlx5_ib_profile *profile;
|
||||
enum rdma_link_layer ll;
|
||||
struct mlx5_ib_dev *dev;
|
||||
int port_type_cap;
|
||||
|
@ -6943,7 +6940,12 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
|
|||
dev->mdev = mdev;
|
||||
dev->num_ports = num_ports;
|
||||
|
||||
return __mlx5_ib_add(dev, &pf_profile);
|
||||
if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_is_roce_enabled(mdev))
|
||||
profile = &raw_eth_profile;
|
||||
else
|
||||
profile = &pf_profile;
|
||||
|
||||
return __mlx5_ib_add(dev, profile);
|
||||
}
|
||||
|
||||
static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
|
||||
|
|
|
@ -1314,6 +1314,9 @@ int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
|
|||
u8 port, int state);
|
||||
int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
|
||||
u8 port, struct ifla_vf_stats *stats);
|
||||
int mlx5_ib_get_vf_guid(struct ib_device *device, int vf, u8 port,
|
||||
struct ifla_vf_guid *node_guid,
|
||||
struct ifla_vf_guid *port_guid);
|
||||
int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u8 port,
|
||||
u64 guid, int type);
|
||||
|
||||
|
|
|
@ -2019,6 +2019,15 @@ static int ipoib_set_vf_guid(struct net_device *dev, int vf, u64 guid, int type)
|
|||
return ib_set_vf_guid(priv->ca, vf, priv->port, guid, type);
|
||||
}
|
||||
|
||||
static int ipoib_get_vf_guid(struct net_device *dev, int vf,
|
||||
struct ifla_vf_guid *node_guid,
|
||||
struct ifla_vf_guid *port_guid)
|
||||
{
|
||||
struct ipoib_dev_priv *priv = ipoib_priv(dev);
|
||||
|
||||
return ib_get_vf_guid(priv->ca, vf, priv->port, node_guid, port_guid);
|
||||
}
|
||||
|
||||
static int ipoib_get_vf_stats(struct net_device *dev, int vf,
|
||||
struct ifla_vf_stats *vf_stats)
|
||||
{
|
||||
|
@ -2045,6 +2054,7 @@ static const struct net_device_ops ipoib_netdev_ops_pf = {
|
|||
.ndo_set_vf_link_state = ipoib_set_vf_link_state,
|
||||
.ndo_get_vf_config = ipoib_get_vf_config,
|
||||
.ndo_get_vf_stats = ipoib_get_vf_stats,
|
||||
.ndo_get_vf_guid = ipoib_get_vf_guid,
|
||||
.ndo_set_vf_guid = ipoib_set_vf_guid,
|
||||
.ndo_set_mac_address = ipoib_set_mac,
|
||||
.ndo_get_stats64 = ipoib_get_stats,
|
||||
|
|
|
@ -177,12 +177,29 @@ enum mlx5_devlink_param_id {
|
|||
MLX5_DEVLINK_PARAM_FLOW_STEERING_MODE,
|
||||
};
|
||||
|
||||
static int mlx5_devlink_enable_roce_validate(struct devlink *devlink, u32 id,
|
||||
union devlink_param_value val,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct mlx5_core_dev *dev = devlink_priv(devlink);
|
||||
bool new_state = val.vbool;
|
||||
|
||||
if (new_state && !MLX5_CAP_GEN(dev, roce)) {
|
||||
NL_SET_ERR_MSG_MOD(extack, "Device doesn't support RoCE");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct devlink_param mlx5_devlink_params[] = {
|
||||
DEVLINK_PARAM_DRIVER(MLX5_DEVLINK_PARAM_FLOW_STEERING_MODE,
|
||||
"flow_steering_mode", DEVLINK_PARAM_TYPE_STRING,
|
||||
BIT(DEVLINK_PARAM_CMODE_RUNTIME),
|
||||
mlx5_devlink_fs_mode_get, mlx5_devlink_fs_mode_set,
|
||||
mlx5_devlink_fs_mode_validate),
|
||||
DEVLINK_PARAM_GENERIC(ENABLE_ROCE, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
|
||||
NULL, NULL, mlx5_devlink_enable_roce_validate),
|
||||
};
|
||||
|
||||
static void mlx5_devlink_set_params_init_values(struct devlink *devlink)
|
||||
|
@ -197,6 +214,11 @@ static void mlx5_devlink_set_params_init_values(struct devlink *devlink)
|
|||
devlink_param_driverinit_value_set(devlink,
|
||||
MLX5_DEVLINK_PARAM_FLOW_STEERING_MODE,
|
||||
value);
|
||||
|
||||
value.vbool = MLX5_CAP_GEN(dev, roce);
|
||||
devlink_param_driverinit_value_set(devlink,
|
||||
DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
|
||||
value);
|
||||
}
|
||||
|
||||
int mlx5_devlink_register(struct devlink *devlink, struct device *dev)
|
||||
|
|
|
@ -1074,7 +1074,7 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
|
|||
memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
|
||||
slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
|
||||
slow_attr->split_count = 0;
|
||||
slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN;
|
||||
slow_attr->dest_chain = FDB_TC_SLOW_PATH_CHAIN;
|
||||
|
||||
rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
|
||||
if (!IS_ERR(rule))
|
||||
|
@ -1091,7 +1091,7 @@ mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
|
|||
memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
|
||||
slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
|
||||
slow_attr->split_count = 0;
|
||||
slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN;
|
||||
slow_attr->dest_chain = FDB_TC_SLOW_PATH_CHAIN;
|
||||
mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
|
||||
flow_flag_clear(flow, SLOW);
|
||||
}
|
||||
|
|
|
@ -111,7 +111,8 @@ static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
|
|||
}
|
||||
|
||||
/* E-Switch vport context HW commands */
|
||||
static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
|
||||
int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport,
|
||||
bool other_vport,
|
||||
void *in, int inlen)
|
||||
{
|
||||
u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)] = {0};
|
||||
|
@ -119,17 +120,12 @@ static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
|
|||
MLX5_SET(modify_esw_vport_context_in, in, opcode,
|
||||
MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
|
||||
MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
|
||||
MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
|
||||
MLX5_SET(modify_esw_vport_context_in, in, other_vport, other_vport);
|
||||
return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
|
||||
}
|
||||
|
||||
int mlx5_eswitch_modify_esw_vport_context(struct mlx5_eswitch *esw, u16 vport,
|
||||
void *in, int inlen)
|
||||
{
|
||||
return modify_esw_vport_context_cmd(esw->dev, vport, in, inlen);
|
||||
}
|
||||
|
||||
static int query_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
|
||||
int mlx5_eswitch_query_esw_vport_context(struct mlx5_core_dev *dev, u16 vport,
|
||||
bool other_vport,
|
||||
void *out, int outlen)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {};
|
||||
|
@ -137,16 +133,10 @@ static int query_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
|
|||
MLX5_SET(query_esw_vport_context_in, in, opcode,
|
||||
MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
|
||||
MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
|
||||
MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
|
||||
MLX5_SET(modify_esw_vport_context_in, in, other_vport, other_vport);
|
||||
return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen);
|
||||
}
|
||||
|
||||
int mlx5_eswitch_query_esw_vport_context(struct mlx5_eswitch *esw, u16 vport,
|
||||
void *out, int outlen)
|
||||
{
|
||||
return query_esw_vport_context_cmd(esw->dev, vport, out, outlen);
|
||||
}
|
||||
|
||||
static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u16 vport,
|
||||
u16 vlan, u8 qos, u8 set_flags)
|
||||
{
|
||||
|
@ -179,7 +169,8 @@ static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u16 vport,
|
|||
MLX5_SET(modify_esw_vport_context_in, in,
|
||||
field_select.vport_cvlan_insert, 1);
|
||||
|
||||
return modify_esw_vport_context_cmd(dev, vport, in, sizeof(in));
|
||||
return mlx5_eswitch_modify_esw_vport_context(dev, vport, true,
|
||||
in, sizeof(in));
|
||||
}
|
||||
|
||||
/* E-Switch FDB */
|
||||
|
@ -452,6 +443,13 @@ static int esw_create_legacy_table(struct mlx5_eswitch *esw)
|
|||
return err;
|
||||
}
|
||||
|
||||
static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
|
||||
{
|
||||
esw_cleanup_vepa_rules(esw);
|
||||
esw_destroy_legacy_fdb_table(esw);
|
||||
esw_destroy_legacy_vepa_table(esw);
|
||||
}
|
||||
|
||||
#define MLX5_LEGACY_SRIOV_VPORT_EVENTS (MLX5_VPORT_UC_ADDR_CHANGE | \
|
||||
MLX5_VPORT_MC_ADDR_CHANGE | \
|
||||
MLX5_VPORT_PROMISC_CHANGE)
|
||||
|
@ -464,15 +462,10 @@ static int esw_legacy_enable(struct mlx5_eswitch *esw)
|
|||
if (ret)
|
||||
return ret;
|
||||
|
||||
mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_LEGACY_SRIOV_VPORT_EVENTS);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
|
||||
{
|
||||
esw_cleanup_vepa_rules(esw);
|
||||
esw_destroy_legacy_fdb_table(esw);
|
||||
esw_destroy_legacy_vepa_table(esw);
|
||||
ret = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_LEGACY_SRIOV_VPORT_EVENTS);
|
||||
if (ret)
|
||||
esw_destroy_legacy_table(esw);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void esw_legacy_disable(struct mlx5_eswitch *esw)
|
||||
|
@ -501,7 +494,7 @@ static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
|
|||
/* Skip mlx5_mpfs_add_mac for eswitch_managers,
|
||||
* it is already done by its netdev in mlx5e_execute_l2_action
|
||||
*/
|
||||
if (esw->manager_vport == vport)
|
||||
if (mlx5_esw_is_manager_vport(esw, vport))
|
||||
goto fdb_add;
|
||||
|
||||
err = mlx5_mpfs_add_mac(esw->dev, mac);
|
||||
|
@ -530,10 +523,10 @@ static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
|
|||
u16 vport = vaddr->vport;
|
||||
int err = 0;
|
||||
|
||||
/* Skip mlx5_mpfs_del_mac for eswitch managerss,
|
||||
/* Skip mlx5_mpfs_del_mac for eswitch managers,
|
||||
* it is already done by its netdev in mlx5e_execute_l2_action
|
||||
*/
|
||||
if (!vaddr->mpfs || esw->manager_vport == vport)
|
||||
if (!vaddr->mpfs || mlx5_esw_is_manager_vport(esw, vport))
|
||||
goto fdb_del;
|
||||
|
||||
err = mlx5_mpfs_del_mac(esw->dev, mac);
|
||||
|
@ -1040,14 +1033,15 @@ int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
|
|||
void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport)
|
||||
{
|
||||
if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan))
|
||||
if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan)) {
|
||||
mlx5_del_flow_rules(vport->egress.allowed_vlan);
|
||||
|
||||
if (!IS_ERR_OR_NULL(vport->egress.drop_rule))
|
||||
mlx5_del_flow_rules(vport->egress.drop_rule);
|
||||
|
||||
vport->egress.allowed_vlan = NULL;
|
||||
vport->egress.drop_rule = NULL;
|
||||
}
|
||||
|
||||
if (!IS_ERR_OR_NULL(vport->egress.legacy.drop_rule)) {
|
||||
mlx5_del_flow_rules(vport->egress.legacy.drop_rule);
|
||||
vport->egress.legacy.drop_rule = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
|
||||
|
@ -1067,57 +1061,21 @@ void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
|
|||
vport->egress.acl = NULL;
|
||||
}
|
||||
|
||||
int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
|
||||
static int
|
||||
esw_vport_create_legacy_ingress_acl_groups(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport)
|
||||
{
|
||||
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
|
||||
struct mlx5_core_dev *dev = esw->dev;
|
||||
struct mlx5_flow_namespace *root_ns;
|
||||
struct mlx5_flow_table *acl;
|
||||
struct mlx5_flow_group *g;
|
||||
void *match_criteria;
|
||||
u32 *flow_group_in;
|
||||
/* The ingress acl table contains 4 groups
|
||||
* (2 active rules at the same time -
|
||||
* 1 allow rule from one of the first 3 groups.
|
||||
* 1 drop rule from the last group):
|
||||
* 1)Allow untagged traffic with smac=original mac.
|
||||
* 2)Allow untagged traffic.
|
||||
* 3)Allow traffic with smac=original mac.
|
||||
* 4)Drop all other traffic.
|
||||
*/
|
||||
int table_size = 4;
|
||||
int err = 0;
|
||||
|
||||
if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (!IS_ERR_OR_NULL(vport->ingress.acl))
|
||||
return 0;
|
||||
|
||||
esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
|
||||
vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
|
||||
|
||||
root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
|
||||
mlx5_eswitch_vport_num_to_index(esw, vport->vport));
|
||||
if (!root_ns) {
|
||||
esw_warn(dev, "Failed to get E-Switch ingress flow namespace for vport (%d)\n", vport->vport);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
int err;
|
||||
|
||||
flow_group_in = kvzalloc(inlen, GFP_KERNEL);
|
||||
if (!flow_group_in)
|
||||
return -ENOMEM;
|
||||
|
||||
acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
|
||||
if (IS_ERR(acl)) {
|
||||
err = PTR_ERR(acl);
|
||||
esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n",
|
||||
vport->vport, err);
|
||||
goto out;
|
||||
}
|
||||
vport->ingress.acl = acl;
|
||||
|
||||
match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
|
||||
|
||||
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
|
||||
|
@ -1127,14 +1085,14 @@ int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
|
|||
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
|
||||
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
|
||||
|
||||
g = mlx5_create_flow_group(acl, flow_group_in);
|
||||
g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
|
||||
if (IS_ERR(g)) {
|
||||
err = PTR_ERR(g);
|
||||
esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged spoofchk flow group, err(%d)\n",
|
||||
esw_warn(dev, "vport[%d] ingress create untagged spoofchk flow group, err(%d)\n",
|
||||
vport->vport, err);
|
||||
goto out;
|
||||
goto spoof_err;
|
||||
}
|
||||
vport->ingress.allow_untagged_spoofchk_grp = g;
|
||||
vport->ingress.legacy.allow_untagged_spoofchk_grp = g;
|
||||
|
||||
memset(flow_group_in, 0, inlen);
|
||||
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
|
||||
|
@ -1142,14 +1100,14 @@ int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
|
|||
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
|
||||
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
|
||||
|
||||
g = mlx5_create_flow_group(acl, flow_group_in);
|
||||
g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
|
||||
if (IS_ERR(g)) {
|
||||
err = PTR_ERR(g);
|
||||
esw_warn(dev, "Failed to create E-Switch vport[%d] ingress untagged flow group, err(%d)\n",
|
||||
esw_warn(dev, "vport[%d] ingress create untagged flow group, err(%d)\n",
|
||||
vport->vport, err);
|
||||
goto out;
|
||||
goto untagged_err;
|
||||
}
|
||||
vport->ingress.allow_untagged_only_grp = g;
|
||||
vport->ingress.legacy.allow_untagged_only_grp = g;
|
||||
|
||||
memset(flow_group_in, 0, inlen);
|
||||
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
|
||||
|
@ -1158,110 +1116,180 @@ int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
|
|||
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 2);
|
||||
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 2);
|
||||
|
||||
g = mlx5_create_flow_group(acl, flow_group_in);
|
||||
g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
|
||||
if (IS_ERR(g)) {
|
||||
err = PTR_ERR(g);
|
||||
esw_warn(dev, "Failed to create E-Switch vport[%d] ingress spoofchk flow group, err(%d)\n",
|
||||
esw_warn(dev, "vport[%d] ingress create spoofchk flow group, err(%d)\n",
|
||||
vport->vport, err);
|
||||
goto out;
|
||||
goto allow_spoof_err;
|
||||
}
|
||||
vport->ingress.allow_spoofchk_only_grp = g;
|
||||
vport->ingress.legacy.allow_spoofchk_only_grp = g;
|
||||
|
||||
memset(flow_group_in, 0, inlen);
|
||||
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 3);
|
||||
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 3);
|
||||
|
||||
g = mlx5_create_flow_group(acl, flow_group_in);
|
||||
g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
|
||||
if (IS_ERR(g)) {
|
||||
err = PTR_ERR(g);
|
||||
esw_warn(dev, "Failed to create E-Switch vport[%d] ingress drop flow group, err(%d)\n",
|
||||
esw_warn(dev, "vport[%d] ingress create drop flow group, err(%d)\n",
|
||||
vport->vport, err);
|
||||
goto out;
|
||||
goto drop_err;
|
||||
}
|
||||
vport->ingress.drop_grp = g;
|
||||
vport->ingress.legacy.drop_grp = g;
|
||||
kvfree(flow_group_in);
|
||||
return 0;
|
||||
|
||||
out:
|
||||
if (err) {
|
||||
if (!IS_ERR_OR_NULL(vport->ingress.allow_spoofchk_only_grp))
|
||||
mlx5_destroy_flow_group(
|
||||
vport->ingress.allow_spoofchk_only_grp);
|
||||
if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_only_grp))
|
||||
mlx5_destroy_flow_group(
|
||||
vport->ingress.allow_untagged_only_grp);
|
||||
if (!IS_ERR_OR_NULL(vport->ingress.allow_untagged_spoofchk_grp))
|
||||
mlx5_destroy_flow_group(
|
||||
vport->ingress.allow_untagged_spoofchk_grp);
|
||||
if (!IS_ERR_OR_NULL(vport->ingress.acl))
|
||||
mlx5_destroy_flow_table(vport->ingress.acl);
|
||||
drop_err:
|
||||
if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_spoofchk_only_grp)) {
|
||||
mlx5_destroy_flow_group(vport->ingress.legacy.allow_spoofchk_only_grp);
|
||||
vport->ingress.legacy.allow_spoofchk_only_grp = NULL;
|
||||
}
|
||||
|
||||
allow_spoof_err:
|
||||
if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_untagged_only_grp)) {
|
||||
mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_only_grp);
|
||||
vport->ingress.legacy.allow_untagged_only_grp = NULL;
|
||||
}
|
||||
untagged_err:
|
||||
if (!IS_ERR_OR_NULL(vport->ingress.legacy.allow_untagged_spoofchk_grp)) {
|
||||
mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_spoofchk_grp);
|
||||
vport->ingress.legacy.allow_untagged_spoofchk_grp = NULL;
|
||||
}
|
||||
spoof_err:
|
||||
kvfree(flow_group_in);
|
||||
return err;
|
||||
}
|
||||
|
||||
int esw_vport_create_ingress_acl_table(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport, int table_size)
|
||||
{
|
||||
struct mlx5_core_dev *dev = esw->dev;
|
||||
struct mlx5_flow_namespace *root_ns;
|
||||
struct mlx5_flow_table *acl;
|
||||
int vport_index;
|
||||
int err;
|
||||
|
||||
if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
|
||||
vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
|
||||
|
||||
vport_index = mlx5_eswitch_vport_num_to_index(esw, vport->vport);
|
||||
root_ns = mlx5_get_flow_vport_acl_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
|
||||
vport_index);
|
||||
if (!root_ns) {
|
||||
esw_warn(dev, "Failed to get E-Switch ingress flow namespace for vport (%d)\n",
|
||||
vport->vport);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
acl = mlx5_create_vport_flow_table(root_ns, 0, table_size, 0, vport->vport);
|
||||
if (IS_ERR(acl)) {
|
||||
err = PTR_ERR(acl);
|
||||
esw_warn(dev, "vport[%d] ingress create flow Table, err(%d)\n",
|
||||
vport->vport, err);
|
||||
return err;
|
||||
}
|
||||
vport->ingress.acl = acl;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void esw_vport_destroy_ingress_acl_table(struct mlx5_vport *vport)
|
||||
{
|
||||
if (!vport->ingress.acl)
|
||||
return;
|
||||
|
||||
mlx5_destroy_flow_table(vport->ingress.acl);
|
||||
vport->ingress.acl = NULL;
|
||||
}
|
||||
|
||||
void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport)
|
||||
{
|
||||
if (!IS_ERR_OR_NULL(vport->ingress.drop_rule))
|
||||
mlx5_del_flow_rules(vport->ingress.drop_rule);
|
||||
|
||||
if (!IS_ERR_OR_NULL(vport->ingress.allow_rule))
|
||||
mlx5_del_flow_rules(vport->ingress.allow_rule);
|
||||
|
||||
vport->ingress.drop_rule = NULL;
|
||||
vport->ingress.allow_rule = NULL;
|
||||
|
||||
esw_vport_del_ingress_acl_modify_metadata(esw, vport);
|
||||
if (vport->ingress.legacy.drop_rule) {
|
||||
mlx5_del_flow_rules(vport->ingress.legacy.drop_rule);
|
||||
vport->ingress.legacy.drop_rule = NULL;
|
||||
}
|
||||
|
||||
void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
|
||||
if (vport->ingress.allow_rule) {
|
||||
mlx5_del_flow_rules(vport->ingress.allow_rule);
|
||||
vport->ingress.allow_rule = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static void esw_vport_disable_legacy_ingress_acl(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport)
|
||||
{
|
||||
if (IS_ERR_OR_NULL(vport->ingress.acl))
|
||||
if (!vport->ingress.acl)
|
||||
return;
|
||||
|
||||
esw_debug(esw->dev, "Destroy vport[%d] E-Switch ingress ACL\n", vport->vport);
|
||||
|
||||
esw_vport_cleanup_ingress_rules(esw, vport);
|
||||
mlx5_destroy_flow_group(vport->ingress.allow_spoofchk_only_grp);
|
||||
mlx5_destroy_flow_group(vport->ingress.allow_untagged_only_grp);
|
||||
mlx5_destroy_flow_group(vport->ingress.allow_untagged_spoofchk_grp);
|
||||
mlx5_destroy_flow_group(vport->ingress.drop_grp);
|
||||
mlx5_destroy_flow_table(vport->ingress.acl);
|
||||
vport->ingress.acl = NULL;
|
||||
vport->ingress.drop_grp = NULL;
|
||||
vport->ingress.allow_spoofchk_only_grp = NULL;
|
||||
vport->ingress.allow_untagged_only_grp = NULL;
|
||||
vport->ingress.allow_untagged_spoofchk_grp = NULL;
|
||||
if (vport->ingress.legacy.allow_spoofchk_only_grp) {
|
||||
mlx5_destroy_flow_group(vport->ingress.legacy.allow_spoofchk_only_grp);
|
||||
vport->ingress.legacy.allow_spoofchk_only_grp = NULL;
|
||||
}
|
||||
if (vport->ingress.legacy.allow_untagged_only_grp) {
|
||||
mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_only_grp);
|
||||
vport->ingress.legacy.allow_untagged_only_grp = NULL;
|
||||
}
|
||||
if (vport->ingress.legacy.allow_untagged_spoofchk_grp) {
|
||||
mlx5_destroy_flow_group(vport->ingress.legacy.allow_untagged_spoofchk_grp);
|
||||
vport->ingress.legacy.allow_untagged_spoofchk_grp = NULL;
|
||||
}
|
||||
if (vport->ingress.legacy.drop_grp) {
|
||||
mlx5_destroy_flow_group(vport->ingress.legacy.drop_grp);
|
||||
vport->ingress.legacy.drop_grp = NULL;
|
||||
}
|
||||
esw_vport_destroy_ingress_acl_table(vport);
|
||||
}
|
||||
|
||||
static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport)
|
||||
{
|
||||
struct mlx5_fc *counter = vport->ingress.drop_counter;
|
||||
struct mlx5_fc *counter = vport->ingress.legacy.drop_counter;
|
||||
struct mlx5_flow_destination drop_ctr_dst = {0};
|
||||
struct mlx5_flow_destination *dst = NULL;
|
||||
struct mlx5_flow_act flow_act = {0};
|
||||
struct mlx5_flow_spec *spec;
|
||||
struct mlx5_flow_spec *spec = NULL;
|
||||
int dest_num = 0;
|
||||
int err = 0;
|
||||
u8 *smac_v;
|
||||
|
||||
/* The ingress acl table contains 4 groups
|
||||
* (2 active rules at the same time -
|
||||
* 1 allow rule from one of the first 3 groups.
|
||||
* 1 drop rule from the last group):
|
||||
* 1)Allow untagged traffic with smac=original mac.
|
||||
* 2)Allow untagged traffic.
|
||||
* 3)Allow traffic with smac=original mac.
|
||||
* 4)Drop all other traffic.
|
||||
*/
|
||||
int table_size = 4;
|
||||
|
||||
esw_vport_cleanup_ingress_rules(esw, vport);
|
||||
|
||||
if (!vport->info.vlan && !vport->info.qos && !vport->info.spoofchk) {
|
||||
esw_vport_disable_ingress_acl(esw, vport);
|
||||
esw_vport_disable_legacy_ingress_acl(esw, vport);
|
||||
return 0;
|
||||
}
|
||||
|
||||
err = esw_vport_enable_ingress_acl(esw, vport);
|
||||
if (!vport->ingress.acl) {
|
||||
err = esw_vport_create_ingress_acl_table(esw, vport, table_size);
|
||||
if (err) {
|
||||
mlx5_core_warn(esw->dev,
|
||||
"failed to enable ingress acl (%d) on vport[%d]\n",
|
||||
esw_warn(esw->dev,
|
||||
"vport[%d] enable ingress acl err (%d)\n",
|
||||
err, vport->vport);
|
||||
return err;
|
||||
}
|
||||
|
||||
err = esw_vport_create_legacy_ingress_acl_groups(esw, vport);
|
||||
if (err)
|
||||
goto out;
|
||||
}
|
||||
|
||||
esw_debug(esw->dev,
|
||||
"vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
|
||||
vport->vport, vport->info.vlan, vport->info.qos);
|
||||
|
@ -1309,21 +1337,59 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
|
|||
dst = &drop_ctr_dst;
|
||||
dest_num++;
|
||||
}
|
||||
vport->ingress.drop_rule =
|
||||
vport->ingress.legacy.drop_rule =
|
||||
mlx5_add_flow_rules(vport->ingress.acl, spec,
|
||||
&flow_act, dst, dest_num);
|
||||
if (IS_ERR(vport->ingress.drop_rule)) {
|
||||
err = PTR_ERR(vport->ingress.drop_rule);
|
||||
if (IS_ERR(vport->ingress.legacy.drop_rule)) {
|
||||
err = PTR_ERR(vport->ingress.legacy.drop_rule);
|
||||
esw_warn(esw->dev,
|
||||
"vport[%d] configure ingress drop rule, err(%d)\n",
|
||||
vport->vport, err);
|
||||
vport->ingress.drop_rule = NULL;
|
||||
vport->ingress.legacy.drop_rule = NULL;
|
||||
goto out;
|
||||
}
|
||||
kvfree(spec);
|
||||
return 0;
|
||||
|
||||
out:
|
||||
if (err)
|
||||
esw_vport_cleanup_ingress_rules(esw, vport);
|
||||
esw_vport_disable_legacy_ingress_acl(esw, vport);
|
||||
kvfree(spec);
|
||||
return err;
|
||||
}
|
||||
|
||||
int mlx5_esw_create_vport_egress_acl_vlan(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport,
|
||||
u16 vlan_id, u32 flow_action)
|
||||
{
|
||||
struct mlx5_flow_act flow_act = {};
|
||||
struct mlx5_flow_spec *spec;
|
||||
int err = 0;
|
||||
|
||||
if (vport->egress.allowed_vlan)
|
||||
return -EEXIST;
|
||||
|
||||
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
|
||||
if (!spec)
|
||||
return -ENOMEM;
|
||||
|
||||
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
|
||||
MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag);
|
||||
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
|
||||
MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vlan_id);
|
||||
|
||||
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
|
||||
flow_act.action = flow_action;
|
||||
vport->egress.allowed_vlan =
|
||||
mlx5_add_flow_rules(vport->egress.acl, spec,
|
||||
&flow_act, NULL, 0);
|
||||
if (IS_ERR(vport->egress.allowed_vlan)) {
|
||||
err = PTR_ERR(vport->egress.allowed_vlan);
|
||||
esw_warn(esw->dev,
|
||||
"vport[%d] configure egress vlan rule failed, err(%d)\n",
|
||||
vport->vport, err);
|
||||
vport->egress.allowed_vlan = NULL;
|
||||
}
|
||||
|
||||
kvfree(spec);
|
||||
return err;
|
||||
}
|
||||
|
@ -1331,7 +1397,7 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
|
|||
static int esw_vport_egress_config(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport)
|
||||
{
|
||||
struct mlx5_fc *counter = vport->egress.drop_counter;
|
||||
struct mlx5_fc *counter = vport->egress.legacy.drop_counter;
|
||||
struct mlx5_flow_destination drop_ctr_dst = {0};
|
||||
struct mlx5_flow_destination *dst = NULL;
|
||||
struct mlx5_flow_act flow_act = {0};
|
||||
|
@ -1358,34 +1424,17 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
|
|||
"vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
|
||||
vport->vport, vport->info.vlan, vport->info.qos);
|
||||
|
||||
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
|
||||
if (!spec) {
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Allowed vlan rule */
|
||||
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
|
||||
MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag);
|
||||
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
|
||||
MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vport->info.vlan);
|
||||
|
||||
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
|
||||
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW;
|
||||
vport->egress.allowed_vlan =
|
||||
mlx5_add_flow_rules(vport->egress.acl, spec,
|
||||
&flow_act, NULL, 0);
|
||||
if (IS_ERR(vport->egress.allowed_vlan)) {
|
||||
err = PTR_ERR(vport->egress.allowed_vlan);
|
||||
esw_warn(esw->dev,
|
||||
"vport[%d] configure egress allowed vlan rule failed, err(%d)\n",
|
||||
vport->vport, err);
|
||||
vport->egress.allowed_vlan = NULL;
|
||||
goto out;
|
||||
}
|
||||
err = mlx5_esw_create_vport_egress_acl_vlan(esw, vport, vport->info.vlan,
|
||||
MLX5_FLOW_CONTEXT_ACTION_ALLOW);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* Drop others rule (star rule) */
|
||||
memset(spec, 0, sizeof(*spec));
|
||||
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
|
||||
if (!spec)
|
||||
goto out;
|
||||
|
||||
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
|
||||
|
||||
/* Attach egress drop flow counter */
|
||||
|
@ -1396,15 +1445,15 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
|
|||
dst = &drop_ctr_dst;
|
||||
dest_num++;
|
||||
}
|
||||
vport->egress.drop_rule =
|
||||
vport->egress.legacy.drop_rule =
|
||||
mlx5_add_flow_rules(vport->egress.acl, spec,
|
||||
&flow_act, dst, dest_num);
|
||||
if (IS_ERR(vport->egress.drop_rule)) {
|
||||
err = PTR_ERR(vport->egress.drop_rule);
|
||||
if (IS_ERR(vport->egress.legacy.drop_rule)) {
|
||||
err = PTR_ERR(vport->egress.legacy.drop_rule);
|
||||
esw_warn(esw->dev,
|
||||
"vport[%d] configure egress drop rule failed, err(%d)\n",
|
||||
vport->vport, err);
|
||||
vport->egress.drop_rule = NULL;
|
||||
vport->egress.legacy.drop_rule = NULL;
|
||||
}
|
||||
out:
|
||||
kvfree(spec);
|
||||
|
@ -1619,7 +1668,7 @@ static void esw_apply_vport_conf(struct mlx5_eswitch *esw,
|
|||
u16 vport_num = vport->vport;
|
||||
int flags;
|
||||
|
||||
if (esw->manager_vport == vport_num)
|
||||
if (mlx5_esw_is_manager_vport(esw, vport_num))
|
||||
return;
|
||||
|
||||
mlx5_modify_vport_admin_state(esw->dev,
|
||||
|
@ -1639,66 +1688,112 @@ static void esw_apply_vport_conf(struct mlx5_eswitch *esw,
|
|||
SET_VLAN_STRIP | SET_VLAN_INSERT : 0;
|
||||
modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan, vport->info.qos,
|
||||
flags);
|
||||
|
||||
/* Only legacy mode needs ACLs */
|
||||
if (esw->mode == MLX5_ESWITCH_LEGACY) {
|
||||
esw_vport_ingress_config(esw, vport);
|
||||
esw_vport_egress_config(esw, vport);
|
||||
}
|
||||
}
|
||||
|
||||
static void esw_vport_create_drop_counters(struct mlx5_vport *vport)
|
||||
static int esw_vport_create_legacy_acl_tables(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport)
|
||||
{
|
||||
struct mlx5_core_dev *dev = vport->dev;
|
||||
int ret;
|
||||
|
||||
if (MLX5_CAP_ESW_INGRESS_ACL(dev, flow_counter)) {
|
||||
vport->ingress.drop_counter = mlx5_fc_create(dev, false);
|
||||
if (IS_ERR(vport->ingress.drop_counter)) {
|
||||
esw_warn(dev,
|
||||
/* Only non manager vports need ACL in legacy mode */
|
||||
if (mlx5_esw_is_manager_vport(esw, vport->vport))
|
||||
return 0;
|
||||
|
||||
if (!mlx5_esw_is_manager_vport(esw, vport->vport) &&
|
||||
MLX5_CAP_ESW_INGRESS_ACL(esw->dev, flow_counter)) {
|
||||
vport->ingress.legacy.drop_counter = mlx5_fc_create(esw->dev, false);
|
||||
if (IS_ERR(vport->ingress.legacy.drop_counter)) {
|
||||
esw_warn(esw->dev,
|
||||
"vport[%d] configure ingress drop rule counter failed\n",
|
||||
vport->vport);
|
||||
vport->ingress.drop_counter = NULL;
|
||||
vport->ingress.legacy.drop_counter = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
if (MLX5_CAP_ESW_EGRESS_ACL(dev, flow_counter)) {
|
||||
vport->egress.drop_counter = mlx5_fc_create(dev, false);
|
||||
if (IS_ERR(vport->egress.drop_counter)) {
|
||||
esw_warn(dev,
|
||||
ret = esw_vport_ingress_config(esw, vport);
|
||||
if (ret)
|
||||
goto ingress_err;
|
||||
|
||||
if (!mlx5_esw_is_manager_vport(esw, vport->vport) &&
|
||||
MLX5_CAP_ESW_EGRESS_ACL(esw->dev, flow_counter)) {
|
||||
vport->egress.legacy.drop_counter = mlx5_fc_create(esw->dev, false);
|
||||
if (IS_ERR(vport->egress.legacy.drop_counter)) {
|
||||
esw_warn(esw->dev,
|
||||
"vport[%d] configure egress drop rule counter failed\n",
|
||||
vport->vport);
|
||||
vport->egress.drop_counter = NULL;
|
||||
}
|
||||
vport->egress.legacy.drop_counter = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static void esw_vport_destroy_drop_counters(struct mlx5_vport *vport)
|
||||
ret = esw_vport_egress_config(esw, vport);
|
||||
if (ret)
|
||||
goto egress_err;
|
||||
|
||||
return 0;
|
||||
|
||||
egress_err:
|
||||
esw_vport_disable_legacy_ingress_acl(esw, vport);
|
||||
mlx5_fc_destroy(esw->dev, vport->egress.legacy.drop_counter);
|
||||
vport->egress.legacy.drop_counter = NULL;
|
||||
|
||||
ingress_err:
|
||||
mlx5_fc_destroy(esw->dev, vport->ingress.legacy.drop_counter);
|
||||
vport->ingress.legacy.drop_counter = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int esw_vport_setup_acl(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport)
|
||||
{
|
||||
struct mlx5_core_dev *dev = vport->dev;
|
||||
|
||||
if (vport->ingress.drop_counter)
|
||||
mlx5_fc_destroy(dev, vport->ingress.drop_counter);
|
||||
if (vport->egress.drop_counter)
|
||||
mlx5_fc_destroy(dev, vport->egress.drop_counter);
|
||||
if (esw->mode == MLX5_ESWITCH_LEGACY)
|
||||
return esw_vport_create_legacy_acl_tables(esw, vport);
|
||||
else
|
||||
return esw_vport_create_offloads_acl_tables(esw, vport);
|
||||
}
|
||||
|
||||
static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
|
||||
static void esw_vport_destroy_legacy_acl_tables(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport)
|
||||
|
||||
{
|
||||
if (mlx5_esw_is_manager_vport(esw, vport->vport))
|
||||
return;
|
||||
|
||||
esw_vport_disable_egress_acl(esw, vport);
|
||||
mlx5_fc_destroy(esw->dev, vport->egress.legacy.drop_counter);
|
||||
vport->egress.legacy.drop_counter = NULL;
|
||||
|
||||
esw_vport_disable_legacy_ingress_acl(esw, vport);
|
||||
mlx5_fc_destroy(esw->dev, vport->ingress.legacy.drop_counter);
|
||||
vport->ingress.legacy.drop_counter = NULL;
|
||||
}
|
||||
|
||||
static void esw_vport_cleanup_acl(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport)
|
||||
{
|
||||
if (esw->mode == MLX5_ESWITCH_LEGACY)
|
||||
esw_vport_destroy_legacy_acl_tables(esw, vport);
|
||||
else
|
||||
esw_vport_destroy_offloads_acl_tables(esw, vport);
|
||||
}
|
||||
|
||||
static int esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
|
||||
enum mlx5_eswitch_vport_event enabled_events)
|
||||
{
|
||||
u16 vport_num = vport->vport;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&esw->state_lock);
|
||||
WARN_ON(vport->enabled);
|
||||
|
||||
esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
|
||||
|
||||
/* Create steering drop counters for ingress and egress ACLs */
|
||||
if (vport_num && esw->mode == MLX5_ESWITCH_LEGACY)
|
||||
esw_vport_create_drop_counters(vport);
|
||||
|
||||
/* Restore old vport configuration */
|
||||
esw_apply_vport_conf(esw, vport);
|
||||
|
||||
ret = esw_vport_setup_acl(esw, vport);
|
||||
if (ret)
|
||||
goto done;
|
||||
|
||||
/* Attach vport to the eswitch rate limiter */
|
||||
if (esw_vport_enable_qos(esw, vport, vport->info.max_rate,
|
||||
vport->qos.bw_share))
|
||||
|
@ -1711,7 +1806,7 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
|
|||
/* Esw manager is trusted by default. Host PF (vport 0) is trusted as well
|
||||
* in smartNIC as it's a vport group manager.
|
||||
*/
|
||||
if (esw->manager_vport == vport_num ||
|
||||
if (mlx5_esw_is_manager_vport(esw, vport_num) ||
|
||||
(!vport_num && mlx5_core_is_ecpf(esw->dev)))
|
||||
vport->info.trusted = true;
|
||||
|
||||
|
@ -1719,7 +1814,9 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, struct mlx5_vport *vport,
|
|||
|
||||
esw->enabled_vports++;
|
||||
esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num);
|
||||
done:
|
||||
mutex_unlock(&esw->state_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void esw_disable_vport(struct mlx5_eswitch *esw,
|
||||
|
@ -1727,18 +1824,16 @@ static void esw_disable_vport(struct mlx5_eswitch *esw,
|
|||
{
|
||||
u16 vport_num = vport->vport;
|
||||
|
||||
mutex_lock(&esw->state_lock);
|
||||
if (!vport->enabled)
|
||||
return;
|
||||
goto done;
|
||||
|
||||
esw_debug(esw->dev, "Disabling vport(%d)\n", vport_num);
|
||||
/* Mark this vport as disabled to discard new events */
|
||||
vport->enabled = false;
|
||||
|
||||
/* Wait for current already scheduled events to complete */
|
||||
flush_workqueue(esw->work_queue);
|
||||
/* Disable events from this vport */
|
||||
arm_vport_context_events_cmd(esw->dev, vport->vport, 0);
|
||||
mutex_lock(&esw->state_lock);
|
||||
/* We don't assume VFs will cleanup after themselves.
|
||||
* Calling vport change handler while vport is disabled will cleanup
|
||||
* the vport resources.
|
||||
|
@ -1746,17 +1841,18 @@ static void esw_disable_vport(struct mlx5_eswitch *esw,
|
|||
esw_vport_change_handle_locked(vport);
|
||||
vport->enabled_events = 0;
|
||||
esw_vport_disable_qos(esw, vport);
|
||||
if (esw->manager_vport != vport_num &&
|
||||
esw->mode == MLX5_ESWITCH_LEGACY) {
|
||||
|
||||
if (!mlx5_esw_is_manager_vport(esw, vport->vport) &&
|
||||
esw->mode == MLX5_ESWITCH_LEGACY)
|
||||
mlx5_modify_vport_admin_state(esw->dev,
|
||||
MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
|
||||
vport_num, 1,
|
||||
MLX5_VPORT_ADMIN_STATE_DOWN);
|
||||
esw_vport_disable_egress_acl(esw, vport);
|
||||
esw_vport_disable_ingress_acl(esw, vport);
|
||||
esw_vport_destroy_drop_counters(vport);
|
||||
}
|
||||
|
||||
esw_vport_cleanup_acl(esw, vport);
|
||||
esw->enabled_vports--;
|
||||
|
||||
done:
|
||||
mutex_unlock(&esw->state_lock);
|
||||
}
|
||||
|
||||
|
@ -1770,12 +1866,8 @@ static int eswitch_vport_event(struct notifier_block *nb,
|
|||
|
||||
vport_num = be16_to_cpu(eqe->data.vport_change.vport_num);
|
||||
vport = mlx5_eswitch_get_vport(esw, vport_num);
|
||||
if (IS_ERR(vport))
|
||||
return NOTIFY_OK;
|
||||
|
||||
if (vport->enabled)
|
||||
if (!IS_ERR(vport))
|
||||
queue_work(esw->work_queue, &vport->vport_change_handler);
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
|
@ -1837,26 +1929,51 @@ static void mlx5_eswitch_event_handlers_unregister(struct mlx5_eswitch *esw)
|
|||
/* mlx5_eswitch_enable_pf_vf_vports() enables vports of PF, ECPF and VFs
|
||||
* whichever are present on the eswitch.
|
||||
*/
|
||||
void
|
||||
int
|
||||
mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
|
||||
enum mlx5_eswitch_vport_event enabled_events)
|
||||
{
|
||||
struct mlx5_vport *vport;
|
||||
int num_vfs;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
/* Enable PF vport */
|
||||
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
|
||||
esw_enable_vport(esw, vport, enabled_events);
|
||||
ret = esw_enable_vport(esw, vport, enabled_events);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Enable ECPF vports */
|
||||
/* Enable ECPF vport */
|
||||
if (mlx5_ecpf_vport_exists(esw->dev)) {
|
||||
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
|
||||
esw_enable_vport(esw, vport, enabled_events);
|
||||
ret = esw_enable_vport(esw, vport, enabled_events);
|
||||
if (ret)
|
||||
goto ecpf_err;
|
||||
}
|
||||
|
||||
/* Enable VF vports */
|
||||
mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
|
||||
esw_enable_vport(esw, vport, enabled_events);
|
||||
mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
|
||||
ret = esw_enable_vport(esw, vport, enabled_events);
|
||||
if (ret)
|
||||
goto vf_err;
|
||||
}
|
||||
return 0;
|
||||
|
||||
vf_err:
|
||||
num_vfs = i - 1;
|
||||
mlx5_esw_for_each_vf_vport_reverse(esw, i, vport, num_vfs)
|
||||
esw_disable_vport(esw, vport);
|
||||
|
||||
if (mlx5_ecpf_vport_exists(esw->dev)) {
|
||||
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
|
||||
esw_disable_vport(esw, vport);
|
||||
}
|
||||
|
||||
ecpf_err:
|
||||
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
|
||||
esw_disable_vport(esw, vport);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* mlx5_eswitch_disable_pf_vf_vports() disables vports of PF, ECPF and VFs
|
||||
|
@ -2474,12 +2591,12 @@ static int mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev,
|
|||
if (!vport->enabled || esw->mode != MLX5_ESWITCH_LEGACY)
|
||||
return 0;
|
||||
|
||||
if (vport->egress.drop_counter)
|
||||
mlx5_fc_query(dev, vport->egress.drop_counter,
|
||||
if (vport->egress.legacy.drop_counter)
|
||||
mlx5_fc_query(dev, vport->egress.legacy.drop_counter,
|
||||
&stats->rx_dropped, &bytes);
|
||||
|
||||
if (vport->ingress.drop_counter)
|
||||
mlx5_fc_query(dev, vport->ingress.drop_counter,
|
||||
if (vport->ingress.legacy.drop_counter)
|
||||
mlx5_fc_query(dev, vport->ingress.legacy.drop_counter,
|
||||
&stats->tx_dropped, &bytes);
|
||||
|
||||
if (!MLX5_CAP_GEN(dev, receive_discard_vport_down) &&
|
||||
|
|
|
@ -43,6 +43,16 @@
|
|||
#include <linux/mlx5/fs.h>
|
||||
#include "lib/mpfs.h"
|
||||
|
||||
#define FDB_TC_MAX_CHAIN 3
|
||||
#define FDB_FT_CHAIN (FDB_TC_MAX_CHAIN + 1)
|
||||
#define FDB_TC_SLOW_PATH_CHAIN (FDB_FT_CHAIN + 1)
|
||||
|
||||
/* The index of the last real chain (FT) + 1 as chain zero is valid as well */
|
||||
#define FDB_NUM_CHAINS (FDB_FT_CHAIN + 1)
|
||||
|
||||
#define FDB_TC_MAX_PRIO 16
|
||||
#define FDB_TC_LEVELS_PER_PRIO 2
|
||||
|
||||
#ifdef CONFIG_MLX5_ESWITCH
|
||||
|
||||
#define MLX5_MAX_UC_PER_VPORT(dev) \
|
||||
|
@ -59,21 +69,22 @@
|
|||
#define mlx5_esw_has_fwd_fdb(dev) \
|
||||
MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_to_table)
|
||||
|
||||
#define FDB_MAX_CHAIN 3
|
||||
#define FDB_SLOW_PATH_CHAIN (FDB_MAX_CHAIN + 1)
|
||||
#define FDB_MAX_PRIO 16
|
||||
|
||||
struct vport_ingress {
|
||||
struct mlx5_flow_table *acl;
|
||||
struct mlx5_flow_group *allow_untagged_spoofchk_grp;
|
||||
struct mlx5_flow_handle *allow_rule;
|
||||
struct {
|
||||
struct mlx5_flow_group *allow_spoofchk_only_grp;
|
||||
struct mlx5_flow_group *allow_untagged_spoofchk_grp;
|
||||
struct mlx5_flow_group *allow_untagged_only_grp;
|
||||
struct mlx5_flow_group *drop_grp;
|
||||
struct mlx5_modify_hdr *modify_metadata;
|
||||
struct mlx5_flow_handle *modify_metadata_rule;
|
||||
struct mlx5_flow_handle *allow_rule;
|
||||
struct mlx5_flow_handle *drop_rule;
|
||||
struct mlx5_fc *drop_counter;
|
||||
} legacy;
|
||||
struct {
|
||||
struct mlx5_flow_group *metadata_grp;
|
||||
struct mlx5_modify_hdr *modify_metadata;
|
||||
struct mlx5_flow_handle *modify_metadata_rule;
|
||||
} offloads;
|
||||
};
|
||||
|
||||
struct vport_egress {
|
||||
|
@ -81,8 +92,10 @@ struct vport_egress {
|
|||
struct mlx5_flow_group *allowed_vlans_grp;
|
||||
struct mlx5_flow_group *drop_grp;
|
||||
struct mlx5_flow_handle *allowed_vlan;
|
||||
struct {
|
||||
struct mlx5_flow_handle *drop_rule;
|
||||
struct mlx5_fc *drop_counter;
|
||||
} legacy;
|
||||
};
|
||||
|
||||
struct mlx5_vport_drop_stats {
|
||||
|
@ -139,7 +152,6 @@ enum offloads_fdb_flags {
|
|||
|
||||
extern const unsigned int ESW_POOLS[4];
|
||||
|
||||
#define PRIO_LEVELS 2
|
||||
struct mlx5_eswitch_fdb {
|
||||
union {
|
||||
struct legacy_fdb {
|
||||
|
@ -166,7 +178,7 @@ struct mlx5_eswitch_fdb {
|
|||
struct {
|
||||
struct mlx5_flow_table *fdb;
|
||||
u32 num_rules;
|
||||
} fdb_prio[FDB_MAX_CHAIN + 1][FDB_MAX_PRIO + 1][PRIO_LEVELS];
|
||||
} fdb_prio[FDB_NUM_CHAINS][FDB_TC_MAX_PRIO + 1][FDB_TC_LEVELS_PER_PRIO];
|
||||
/* Protects fdb_prio table */
|
||||
struct mutex fdb_prio_lock;
|
||||
|
||||
|
@ -217,8 +229,8 @@ enum {
|
|||
struct mlx5_eswitch {
|
||||
struct mlx5_core_dev *dev;
|
||||
struct mlx5_nb nb;
|
||||
/* legacy data structures */
|
||||
struct mlx5_eswitch_fdb fdb_table;
|
||||
/* legacy data structures */
|
||||
struct hlist_head mc_table[MLX5_L2_ADDR_HASH_SIZE];
|
||||
struct esw_mc_addr mc_promisc;
|
||||
/* end of legacy */
|
||||
|
@ -251,18 +263,16 @@ void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw);
|
|||
int esw_offloads_init_reps(struct mlx5_eswitch *esw);
|
||||
void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport);
|
||||
int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport);
|
||||
int esw_vport_create_ingress_acl_table(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport,
|
||||
int table_size);
|
||||
void esw_vport_destroy_ingress_acl_table(struct mlx5_vport *vport);
|
||||
void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport);
|
||||
int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport);
|
||||
void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport);
|
||||
void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport);
|
||||
void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport);
|
||||
int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num,
|
||||
u32 rate_mbps);
|
||||
|
||||
|
@ -292,9 +302,11 @@ int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
|
|||
struct ifla_vf_stats *vf_stats);
|
||||
void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule);
|
||||
|
||||
int mlx5_eswitch_modify_esw_vport_context(struct mlx5_eswitch *esw, u16 vport,
|
||||
int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport,
|
||||
bool other_vport,
|
||||
void *in, int inlen);
|
||||
int mlx5_eswitch_query_esw_vport_context(struct mlx5_eswitch *esw, u16 vport,
|
||||
int mlx5_eswitch_query_esw_vport_context(struct mlx5_core_dev *dev, u16 vport,
|
||||
bool other_vport,
|
||||
void *out, int outlen);
|
||||
|
||||
struct mlx5_flow_spec;
|
||||
|
@ -421,6 +433,10 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
|
|||
int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
|
||||
u16 vport, u16 vlan, u8 qos, u8 set_flags);
|
||||
|
||||
int mlx5_esw_create_vport_egress_acl_vlan(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport,
|
||||
u16 vlan_id, u32 flow_action);
|
||||
|
||||
static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev,
|
||||
u8 vlan_depth)
|
||||
{
|
||||
|
@ -459,6 +475,12 @@ static inline u16 mlx5_eswitch_manager_vport(struct mlx5_core_dev *dev)
|
|||
MLX5_VPORT_ECPF : MLX5_VPORT_PF;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
mlx5_esw_is_manager_vport(const struct mlx5_eswitch *esw, u16 vport_num)
|
||||
{
|
||||
return esw->manager_vport == vport_num;
|
||||
}
|
||||
|
||||
static inline u16 mlx5_eswitch_first_host_vport_num(struct mlx5_core_dev *dev)
|
||||
{
|
||||
return mlx5_core_is_ecpf_esw_manager(dev) ?
|
||||
|
@ -593,11 +615,18 @@ bool mlx5_eswitch_is_vf_vport(const struct mlx5_eswitch *esw, u16 vport_num);
|
|||
void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs);
|
||||
int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data);
|
||||
|
||||
void
|
||||
int
|
||||
mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
|
||||
enum mlx5_eswitch_vport_event enabled_events);
|
||||
void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw);
|
||||
|
||||
int
|
||||
esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport);
|
||||
void
|
||||
esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport);
|
||||
|
||||
#else /* CONFIG_MLX5_ESWITCH */
|
||||
/* eswitch API stubs */
|
||||
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }
|
||||
|
@ -613,10 +642,6 @@ static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
|
|||
|
||||
static inline void mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, const int num_vfs) {}
|
||||
|
||||
#define FDB_MAX_CHAIN 1
|
||||
#define FDB_SLOW_PATH_CHAIN (FDB_MAX_CHAIN + 1)
|
||||
#define FDB_MAX_PRIO 1
|
||||
|
||||
#endif /* CONFIG_MLX5_ESWITCH */
|
||||
|
||||
#endif /* __MLX5_ESWITCH_H__ */
|
||||
|
|
|
@ -75,7 +75,7 @@ bool mlx5_eswitch_prios_supported(struct mlx5_eswitch *esw)
|
|||
u32 mlx5_eswitch_get_chain_range(struct mlx5_eswitch *esw)
|
||||
{
|
||||
if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)
|
||||
return FDB_MAX_CHAIN;
|
||||
return FDB_TC_MAX_CHAIN;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -83,7 +83,7 @@ u32 mlx5_eswitch_get_chain_range(struct mlx5_eswitch *esw)
|
|||
u16 mlx5_eswitch_get_prio_range(struct mlx5_eswitch *esw)
|
||||
{
|
||||
if (esw->fdb_table.flags & ESW_FDB_CHAINS_AND_PRIOS_SUPPORTED)
|
||||
return FDB_MAX_PRIO;
|
||||
return FDB_TC_MAX_PRIO;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
@ -600,7 +600,7 @@ static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
|
|||
if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
|
||||
return 0;
|
||||
|
||||
err = mlx5_eswitch_query_esw_vport_context(esw, esw->manager_vport,
|
||||
err = mlx5_eswitch_query_esw_vport_context(esw->dev, 0, false,
|
||||
out, sizeof(out));
|
||||
if (err)
|
||||
return err;
|
||||
|
@ -619,7 +619,7 @@ static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
|
|||
MLX5_SET(modify_esw_vport_context_in, in,
|
||||
field_select.fdb_to_vport_reg_c_id, 1);
|
||||
|
||||
return mlx5_eswitch_modify_esw_vport_context(esw, esw->manager_vport,
|
||||
return mlx5_eswitch_modify_esw_vport_context(esw->dev, 0, false,
|
||||
in, sizeof(in));
|
||||
}
|
||||
|
||||
|
@ -928,7 +928,7 @@ esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
|
|||
int table_prio, l = 0;
|
||||
u32 flags = 0;
|
||||
|
||||
if (chain == FDB_SLOW_PATH_CHAIN)
|
||||
if (chain == FDB_TC_SLOW_PATH_CHAIN)
|
||||
return esw->fdb_table.offloads.slow_fdb;
|
||||
|
||||
mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock);
|
||||
|
@ -953,7 +953,7 @@ esw_get_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
|
|||
flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
|
||||
MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
|
||||
|
||||
table_prio = (chain * FDB_MAX_PRIO) + prio - 1;
|
||||
table_prio = prio - 1;
|
||||
|
||||
/* create earlier levels for correct fs_core lookup when
|
||||
* connecting tables
|
||||
|
@ -990,7 +990,7 @@ esw_put_prio_table(struct mlx5_eswitch *esw, u32 chain, u16 prio, int level)
|
|||
{
|
||||
int l;
|
||||
|
||||
if (chain == FDB_SLOW_PATH_CHAIN)
|
||||
if (chain == FDB_TC_SLOW_PATH_CHAIN)
|
||||
return;
|
||||
|
||||
mutex_lock(&esw->fdb_table.offloads.fdb_prio_lock);
|
||||
|
@ -1778,9 +1778,9 @@ static int esw_vport_ingress_prio_tag_config(struct mlx5_eswitch *esw,
|
|||
flow_act.vlan[0].vid = 0;
|
||||
flow_act.vlan[0].prio = 0;
|
||||
|
||||
if (vport->ingress.modify_metadata_rule) {
|
||||
if (vport->ingress.offloads.modify_metadata_rule) {
|
||||
flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
|
||||
flow_act.modify_hdr = vport->ingress.modify_metadata;
|
||||
flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
|
||||
}
|
||||
|
||||
vport->ingress.allow_rule =
|
||||
|
@ -1816,11 +1816,11 @@ static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
|
|||
MLX5_SET(set_action_in, action, data,
|
||||
mlx5_eswitch_get_vport_metadata_for_match(esw, vport->vport));
|
||||
|
||||
vport->ingress.modify_metadata =
|
||||
vport->ingress.offloads.modify_metadata =
|
||||
mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS,
|
||||
1, action);
|
||||
if (IS_ERR(vport->ingress.modify_metadata)) {
|
||||
err = PTR_ERR(vport->ingress.modify_metadata);
|
||||
if (IS_ERR(vport->ingress.offloads.modify_metadata)) {
|
||||
err = PTR_ERR(vport->ingress.offloads.modify_metadata);
|
||||
esw_warn(esw->dev,
|
||||
"failed to alloc modify header for vport %d ingress acl (%d)\n",
|
||||
vport->vport, err);
|
||||
|
@ -1828,99 +1828,75 @@ static int esw_vport_add_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
|
|||
}
|
||||
|
||||
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | MLX5_FLOW_CONTEXT_ACTION_ALLOW;
|
||||
flow_act.modify_hdr = vport->ingress.modify_metadata;
|
||||
vport->ingress.modify_metadata_rule = mlx5_add_flow_rules(vport->ingress.acl,
|
||||
flow_act.modify_hdr = vport->ingress.offloads.modify_metadata;
|
||||
vport->ingress.offloads.modify_metadata_rule =
|
||||
mlx5_add_flow_rules(vport->ingress.acl,
|
||||
&spec, &flow_act, NULL, 0);
|
||||
if (IS_ERR(vport->ingress.modify_metadata_rule)) {
|
||||
err = PTR_ERR(vport->ingress.modify_metadata_rule);
|
||||
if (IS_ERR(vport->ingress.offloads.modify_metadata_rule)) {
|
||||
err = PTR_ERR(vport->ingress.offloads.modify_metadata_rule);
|
||||
esw_warn(esw->dev,
|
||||
"failed to add setting metadata rule for vport %d ingress acl, err(%d)\n",
|
||||
vport->vport, err);
|
||||
vport->ingress.modify_metadata_rule = NULL;
|
||||
vport->ingress.offloads.modify_metadata_rule = NULL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
out:
|
||||
if (err)
|
||||
mlx5_modify_header_dealloc(esw->dev, vport->ingress.modify_metadata);
|
||||
mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata);
|
||||
return err;
|
||||
}
|
||||
|
||||
void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
|
||||
static void esw_vport_del_ingress_acl_modify_metadata(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport)
|
||||
{
|
||||
if (vport->ingress.modify_metadata_rule) {
|
||||
mlx5_del_flow_rules(vport->ingress.modify_metadata_rule);
|
||||
mlx5_modify_header_dealloc(esw->dev, vport->ingress.modify_metadata);
|
||||
if (vport->ingress.offloads.modify_metadata_rule) {
|
||||
mlx5_del_flow_rules(vport->ingress.offloads.modify_metadata_rule);
|
||||
mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata);
|
||||
|
||||
vport->ingress.modify_metadata_rule = NULL;
|
||||
vport->ingress.offloads.modify_metadata_rule = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static int esw_vport_egress_prio_tag_config(struct mlx5_eswitch *esw,
|
||||
static int esw_vport_create_ingress_acl_group(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport)
|
||||
{
|
||||
struct mlx5_flow_act flow_act = {0};
|
||||
struct mlx5_flow_spec *spec;
|
||||
int err = 0;
|
||||
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
|
||||
struct mlx5_flow_group *g;
|
||||
u32 *flow_group_in;
|
||||
int ret = 0;
|
||||
|
||||
if (!MLX5_CAP_GEN(esw->dev, prio_tag_required))
|
||||
return 0;
|
||||
flow_group_in = kvzalloc(inlen, GFP_KERNEL);
|
||||
if (!flow_group_in)
|
||||
return -ENOMEM;
|
||||
|
||||
/* For prio tag mode, there is only 1 FTEs:
|
||||
* 1) prio tag packets - pop the prio tag VLAN, allow
|
||||
* Unmatched traffic is allowed by default
|
||||
*/
|
||||
memset(flow_group_in, 0, inlen);
|
||||
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
|
||||
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
|
||||
|
||||
esw_vport_cleanup_egress_rules(esw, vport);
|
||||
|
||||
err = esw_vport_enable_egress_acl(esw, vport);
|
||||
if (err) {
|
||||
mlx5_core_warn(esw->dev,
|
||||
"failed to enable egress acl (%d) on vport[%d]\n",
|
||||
err, vport->vport);
|
||||
return err;
|
||||
}
|
||||
|
||||
esw_debug(esw->dev,
|
||||
"vport[%d] configure prio tag egress rules\n", vport->vport);
|
||||
|
||||
spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
|
||||
if (!spec) {
|
||||
err = -ENOMEM;
|
||||
goto out_no_mem;
|
||||
}
|
||||
|
||||
/* prio tag vlan rule - pop it so VF receives untagged packets */
|
||||
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
|
||||
MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag);
|
||||
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
|
||||
MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, 0);
|
||||
|
||||
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
|
||||
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
|
||||
MLX5_FLOW_CONTEXT_ACTION_ALLOW;
|
||||
vport->egress.allowed_vlan =
|
||||
mlx5_add_flow_rules(vport->egress.acl, spec,
|
||||
&flow_act, NULL, 0);
|
||||
if (IS_ERR(vport->egress.allowed_vlan)) {
|
||||
err = PTR_ERR(vport->egress.allowed_vlan);
|
||||
g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in);
|
||||
if (IS_ERR(g)) {
|
||||
ret = PTR_ERR(g);
|
||||
esw_warn(esw->dev,
|
||||
"vport[%d] configure egress pop prio tag vlan rule failed, err(%d)\n",
|
||||
vport->vport, err);
|
||||
vport->egress.allowed_vlan = NULL;
|
||||
goto out;
|
||||
"Failed to create vport[%d] ingress metadata group, err(%d)\n",
|
||||
vport->vport, ret);
|
||||
goto grp_err;
|
||||
}
|
||||
vport->ingress.offloads.metadata_grp = g;
|
||||
grp_err:
|
||||
kvfree(flow_group_in);
|
||||
return ret;
|
||||
}
|
||||
|
||||
out:
|
||||
kvfree(spec);
|
||||
out_no_mem:
|
||||
if (err)
|
||||
esw_vport_cleanup_egress_rules(esw, vport);
|
||||
return err;
|
||||
static void esw_vport_destroy_ingress_acl_group(struct mlx5_vport *vport)
|
||||
{
|
||||
if (vport->ingress.offloads.metadata_grp) {
|
||||
mlx5_destroy_flow_group(vport->ingress.offloads.metadata_grp);
|
||||
vport->ingress.offloads.metadata_grp = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static int esw_vport_ingress_common_config(struct mlx5_eswitch *esw,
|
||||
static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport)
|
||||
{
|
||||
int err;
|
||||
|
@ -1930,8 +1906,7 @@ static int esw_vport_ingress_common_config(struct mlx5_eswitch *esw,
|
|||
return 0;
|
||||
|
||||
esw_vport_cleanup_ingress_rules(esw, vport);
|
||||
|
||||
err = esw_vport_enable_ingress_acl(esw, vport);
|
||||
err = esw_vport_create_ingress_acl_table(esw, vport, 1);
|
||||
if (err) {
|
||||
esw_warn(esw->dev,
|
||||
"failed to enable ingress acl (%d) on vport[%d]\n",
|
||||
|
@ -1939,25 +1914,65 @@ static int esw_vport_ingress_common_config(struct mlx5_eswitch *esw,
|
|||
return err;
|
||||
}
|
||||
|
||||
err = esw_vport_create_ingress_acl_group(esw, vport);
|
||||
if (err)
|
||||
goto group_err;
|
||||
|
||||
esw_debug(esw->dev,
|
||||
"vport[%d] configure ingress rules\n", vport->vport);
|
||||
|
||||
if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
|
||||
err = esw_vport_add_ingress_acl_modify_metadata(esw, vport);
|
||||
if (err)
|
||||
goto out;
|
||||
goto metadata_err;
|
||||
}
|
||||
|
||||
if (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
|
||||
mlx5_eswitch_is_vf_vport(esw, vport->vport)) {
|
||||
err = esw_vport_ingress_prio_tag_config(esw, vport);
|
||||
if (err)
|
||||
goto out;
|
||||
goto prio_tag_err;
|
||||
}
|
||||
return 0;
|
||||
|
||||
prio_tag_err:
|
||||
esw_vport_del_ingress_acl_modify_metadata(esw, vport);
|
||||
metadata_err:
|
||||
esw_vport_cleanup_ingress_rules(esw, vport);
|
||||
esw_vport_destroy_ingress_acl_group(vport);
|
||||
group_err:
|
||||
esw_vport_destroy_ingress_acl_table(vport);
|
||||
return err;
|
||||
}
|
||||
|
||||
out:
|
||||
static int esw_vport_egress_config(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (!MLX5_CAP_GEN(esw->dev, prio_tag_required))
|
||||
return 0;
|
||||
|
||||
esw_vport_cleanup_egress_rules(esw, vport);
|
||||
|
||||
err = esw_vport_enable_egress_acl(esw, vport);
|
||||
if (err)
|
||||
esw_vport_disable_ingress_acl(esw, vport);
|
||||
return err;
|
||||
|
||||
/* For prio tag mode, there is only 1 FTEs:
|
||||
* 1) prio tag packets - pop the prio tag VLAN, allow
|
||||
* Unmatched traffic is allowed by default
|
||||
*/
|
||||
esw_debug(esw->dev,
|
||||
"vport[%d] configure prio tag egress rules\n", vport->vport);
|
||||
|
||||
/* prio tag vlan rule - pop it so VF receives untagged packets */
|
||||
err = mlx5_esw_create_vport_egress_acl_vlan(esw, vport, 0,
|
||||
MLX5_FLOW_CONTEXT_ACTION_VLAN_POP |
|
||||
MLX5_FLOW_CONTEXT_ACTION_ALLOW);
|
||||
if (err)
|
||||
esw_vport_disable_egress_acl(esw, vport);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -1981,54 +1996,59 @@ esw_check_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
|
|||
return true;
|
||||
}
|
||||
|
||||
static int esw_create_offloads_acl_tables(struct mlx5_eswitch *esw)
|
||||
int
|
||||
esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport)
|
||||
{
|
||||
int err;
|
||||
|
||||
err = esw_vport_ingress_config(esw, vport);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (mlx5_eswitch_is_vf_vport(esw, vport->vport)) {
|
||||
err = esw_vport_egress_config(esw, vport);
|
||||
if (err) {
|
||||
esw_vport_del_ingress_acl_modify_metadata(esw, vport);
|
||||
esw_vport_cleanup_ingress_rules(esw, vport);
|
||||
esw_vport_destroy_ingress_acl_table(vport);
|
||||
}
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
void
|
||||
esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
|
||||
struct mlx5_vport *vport)
|
||||
{
|
||||
esw_vport_disable_egress_acl(esw, vport);
|
||||
esw_vport_del_ingress_acl_modify_metadata(esw, vport);
|
||||
esw_vport_cleanup_ingress_rules(esw, vport);
|
||||
esw_vport_destroy_ingress_acl_group(vport);
|
||||
esw_vport_destroy_ingress_acl_table(vport);
|
||||
}
|
||||
|
||||
static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
|
||||
{
|
||||
struct mlx5_vport *vport;
|
||||
int i, j;
|
||||
int err;
|
||||
|
||||
if (esw_check_vport_match_metadata_supported(esw))
|
||||
esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
|
||||
|
||||
mlx5_esw_for_all_vports(esw, i, vport) {
|
||||
err = esw_vport_ingress_common_config(esw, vport);
|
||||
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
|
||||
err = esw_vport_create_offloads_acl_tables(esw, vport);
|
||||
if (err)
|
||||
goto err_ingress;
|
||||
|
||||
if (mlx5_eswitch_is_vf_vport(esw, vport->vport)) {
|
||||
err = esw_vport_egress_prio_tag_config(esw, vport);
|
||||
if (err)
|
||||
goto err_egress;
|
||||
}
|
||||
}
|
||||
|
||||
if (mlx5_eswitch_vport_match_metadata_enabled(esw))
|
||||
esw_info(esw->dev, "Use metadata reg_c as source vport to match\n");
|
||||
|
||||
return 0;
|
||||
|
||||
err_egress:
|
||||
esw_vport_disable_ingress_acl(esw, vport);
|
||||
err_ingress:
|
||||
for (j = MLX5_VPORT_PF; j < i; j++) {
|
||||
vport = &esw->vports[j];
|
||||
esw_vport_disable_egress_acl(esw, vport);
|
||||
esw_vport_disable_ingress_acl(esw, vport);
|
||||
}
|
||||
|
||||
esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
|
||||
return err;
|
||||
}
|
||||
|
||||
static void esw_destroy_offloads_acl_tables(struct mlx5_eswitch *esw)
|
||||
static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
|
||||
{
|
||||
struct mlx5_vport *vport;
|
||||
int i;
|
||||
|
||||
mlx5_esw_for_all_vports(esw, i, vport) {
|
||||
esw_vport_disable_egress_acl(esw, vport);
|
||||
esw_vport_disable_ingress_acl(esw, vport);
|
||||
}
|
||||
|
||||
vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
|
||||
esw_vport_destroy_offloads_acl_tables(esw, vport);
|
||||
esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
|
||||
}
|
||||
|
||||
|
@ -2046,7 +2066,7 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
|
|||
memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
|
||||
mutex_init(&esw->fdb_table.offloads.fdb_prio_lock);
|
||||
|
||||
err = esw_create_offloads_acl_tables(esw);
|
||||
err = esw_create_uplink_offloads_acl_tables(esw);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
|
@ -2071,7 +2091,7 @@ static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
|
|||
esw_destroy_offloads_fdb_tables(esw);
|
||||
|
||||
create_fdb_err:
|
||||
esw_destroy_offloads_acl_tables(esw);
|
||||
esw_destroy_uplink_offloads_acl_tables(esw);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -2081,7 +2101,7 @@ static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
|
|||
esw_destroy_vport_rx_group(esw);
|
||||
esw_destroy_offloads_table(esw);
|
||||
esw_destroy_offloads_fdb_tables(esw);
|
||||
esw_destroy_offloads_acl_tables(esw);
|
||||
esw_destroy_uplink_offloads_acl_tables(esw);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -2170,7 +2190,9 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
|
|||
if (err)
|
||||
goto err_vport_metadata;
|
||||
|
||||
mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
|
||||
err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
|
||||
if (err)
|
||||
goto err_vports;
|
||||
|
||||
err = esw_offloads_load_all_reps(esw);
|
||||
if (err)
|
||||
|
@ -2183,6 +2205,7 @@ int esw_offloads_enable(struct mlx5_eswitch *esw)
|
|||
|
||||
err_reps:
|
||||
mlx5_eswitch_disable_pf_vf_vports(esw);
|
||||
err_vports:
|
||||
esw_set_passing_vport_metadata(esw, false);
|
||||
err_vport_metadata:
|
||||
esw_offloads_steering_cleanup(esw);
|
||||
|
|
|
@ -2359,9 +2359,17 @@ static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level)
|
|||
int acc_level_ns = acc_level;
|
||||
|
||||
prio->start_level = acc_level;
|
||||
fs_for_each_ns(ns, prio)
|
||||
fs_for_each_ns(ns, prio) {
|
||||
/* This updates start_level and num_levels of ns's priority descendants */
|
||||
acc_level_ns = set_prio_attrs_in_ns(ns, acc_level);
|
||||
|
||||
/* If this a prio with chains, and we can jump from one chain
|
||||
* (namepsace) to another, so we accumulate the levels
|
||||
*/
|
||||
if (prio->node.type == FS_TYPE_PRIO_CHAINS)
|
||||
acc_level = acc_level_ns;
|
||||
}
|
||||
|
||||
if (!prio->num_levels)
|
||||
prio->num_levels = acc_level_ns - prio->start_level;
|
||||
WARN_ON(prio->num_levels < acc_level_ns - prio->start_level);
|
||||
|
@ -2550,58 +2558,109 @@ static int init_rdma_rx_root_ns(struct mlx5_flow_steering *steering)
|
|||
steering->rdma_rx_root_ns = NULL;
|
||||
return err;
|
||||
}
|
||||
static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
|
||||
|
||||
/* FT and tc chains are stored in the same array so we can re-use the
|
||||
* mlx5_get_fdb_sub_ns() and tc api for FT chains.
|
||||
* When creating a new ns for each chain store it in the first available slot.
|
||||
* Assume tc chains are created and stored first and only then the FT chain.
|
||||
*/
|
||||
static void store_fdb_sub_ns_prio_chain(struct mlx5_flow_steering *steering,
|
||||
struct mlx5_flow_namespace *ns)
|
||||
{
|
||||
int chain = 0;
|
||||
|
||||
while (steering->fdb_sub_ns[chain])
|
||||
++chain;
|
||||
|
||||
steering->fdb_sub_ns[chain] = ns;
|
||||
}
|
||||
|
||||
static int create_fdb_sub_ns_prio_chain(struct mlx5_flow_steering *steering,
|
||||
struct fs_prio *maj_prio)
|
||||
{
|
||||
struct mlx5_flow_namespace *ns;
|
||||
struct fs_prio *maj_prio;
|
||||
struct fs_prio *min_prio;
|
||||
int prio;
|
||||
|
||||
ns = fs_create_namespace(maj_prio, MLX5_FLOW_TABLE_MISS_ACTION_DEF);
|
||||
if (IS_ERR(ns))
|
||||
return PTR_ERR(ns);
|
||||
|
||||
for (prio = 0; prio < FDB_TC_MAX_PRIO; prio++) {
|
||||
min_prio = fs_create_prio(ns, prio, FDB_TC_LEVELS_PER_PRIO);
|
||||
if (IS_ERR(min_prio))
|
||||
return PTR_ERR(min_prio);
|
||||
}
|
||||
|
||||
store_fdb_sub_ns_prio_chain(steering, ns);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int create_fdb_chains(struct mlx5_flow_steering *steering,
|
||||
int fs_prio,
|
||||
int chains)
|
||||
{
|
||||
struct fs_prio *maj_prio;
|
||||
int levels;
|
||||
int chain;
|
||||
int prio;
|
||||
int err;
|
||||
|
||||
levels = FDB_TC_LEVELS_PER_PRIO * FDB_TC_MAX_PRIO * chains;
|
||||
maj_prio = fs_create_prio_chained(&steering->fdb_root_ns->ns,
|
||||
fs_prio,
|
||||
levels);
|
||||
if (IS_ERR(maj_prio))
|
||||
return PTR_ERR(maj_prio);
|
||||
|
||||
for (chain = 0; chain < chains; chain++) {
|
||||
err = create_fdb_sub_ns_prio_chain(steering, maj_prio);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int create_fdb_fast_path(struct mlx5_flow_steering *steering)
|
||||
{
|
||||
int err;
|
||||
|
||||
steering->fdb_sub_ns = kcalloc(FDB_NUM_CHAINS,
|
||||
sizeof(*steering->fdb_sub_ns),
|
||||
GFP_KERNEL);
|
||||
if (!steering->fdb_sub_ns)
|
||||
return -ENOMEM;
|
||||
|
||||
err = create_fdb_chains(steering, FDB_TC_OFFLOAD, FDB_TC_MAX_CHAIN + 1);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = create_fdb_chains(steering, FDB_FT_OFFLOAD, 1);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int init_fdb_root_ns(struct mlx5_flow_steering *steering)
|
||||
{
|
||||
struct fs_prio *maj_prio;
|
||||
int err;
|
||||
|
||||
steering->fdb_root_ns = create_root_ns(steering, FS_FT_FDB);
|
||||
if (!steering->fdb_root_ns)
|
||||
return -ENOMEM;
|
||||
|
||||
steering->fdb_sub_ns = kzalloc(sizeof(steering->fdb_sub_ns) *
|
||||
(FDB_MAX_CHAIN + 1), GFP_KERNEL);
|
||||
if (!steering->fdb_sub_ns)
|
||||
return -ENOMEM;
|
||||
|
||||
maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_BYPASS_PATH,
|
||||
1);
|
||||
if (IS_ERR(maj_prio)) {
|
||||
err = PTR_ERR(maj_prio);
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
levels = 2 * FDB_MAX_PRIO * (FDB_MAX_CHAIN + 1);
|
||||
maj_prio = fs_create_prio_chained(&steering->fdb_root_ns->ns,
|
||||
FDB_FAST_PATH,
|
||||
levels);
|
||||
if (IS_ERR(maj_prio)) {
|
||||
err = PTR_ERR(maj_prio);
|
||||
err = create_fdb_fast_path(steering);
|
||||
if (err)
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
for (chain = 0; chain <= FDB_MAX_CHAIN; chain++) {
|
||||
ns = fs_create_namespace(maj_prio, MLX5_FLOW_TABLE_MISS_ACTION_DEF);
|
||||
if (IS_ERR(ns)) {
|
||||
err = PTR_ERR(ns);
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
for (prio = 0; prio < FDB_MAX_PRIO * (chain + 1); prio++) {
|
||||
min_prio = fs_create_prio(ns, prio, 2);
|
||||
if (IS_ERR(min_prio)) {
|
||||
err = PTR_ERR(min_prio);
|
||||
goto out_err;
|
||||
}
|
||||
}
|
||||
|
||||
steering->fdb_sub_ns[chain] = ns;
|
||||
}
|
||||
|
||||
maj_prio = fs_create_prio(&steering->fdb_root_ns->ns, FDB_SLOW_PATH, 1);
|
||||
if (IS_ERR(maj_prio)) {
|
||||
|
|
|
@ -1117,6 +1117,11 @@ static inline bool mlx5_core_is_pf(const struct mlx5_core_dev *dev)
|
|||
return dev->coredev_type == MLX5_COREDEV_PF;
|
||||
}
|
||||
|
||||
static inline bool mlx5_core_is_vf(const struct mlx5_core_dev *dev)
|
||||
{
|
||||
return dev->coredev_type == MLX5_COREDEV_VF;
|
||||
}
|
||||
|
||||
static inline bool mlx5_core_is_ecpf(struct mlx5_core_dev *dev)
|
||||
{
|
||||
return dev->caps.embedded_cpu;
|
||||
|
@ -1182,4 +1187,15 @@ enum {
|
|||
MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
|
||||
};
|
||||
|
||||
static inline bool mlx5_is_roce_enabled(struct mlx5_core_dev *dev)
|
||||
{
|
||||
struct devlink *devlink = priv_to_devlink(dev);
|
||||
union devlink_param_value val;
|
||||
|
||||
devlink_param_driverinit_value_get(devlink,
|
||||
DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
|
||||
&val);
|
||||
return val.vbool;
|
||||
}
|
||||
|
||||
#endif /* MLX5_DRIVER_H */
|
||||
|
|
|
@ -80,7 +80,8 @@ enum mlx5_flow_namespace_type {
|
|||
|
||||
enum {
|
||||
FDB_BYPASS_PATH,
|
||||
FDB_FAST_PATH,
|
||||
FDB_TC_OFFLOAD,
|
||||
FDB_FT_OFFLOAD,
|
||||
FDB_SLOW_PATH,
|
||||
};
|
||||
|
||||
|
|
|
@ -1316,6 +1316,10 @@ struct net_device_ops {
|
|||
struct nlattr *port[]);
|
||||
int (*ndo_get_vf_port)(struct net_device *dev,
|
||||
int vf, struct sk_buff *skb);
|
||||
int (*ndo_get_vf_guid)(struct net_device *dev,
|
||||
int vf,
|
||||
struct ifla_vf_guid *node_guid,
|
||||
struct ifla_vf_guid *port_guid);
|
||||
int (*ndo_set_vf_guid)(struct net_device *dev,
|
||||
int vf, u64 guid,
|
||||
int guid_type);
|
||||
|
|
|
@ -400,6 +400,7 @@ enum devlink_param_generic_id {
|
|||
DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN,
|
||||
DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY,
|
||||
DEVLINK_PARAM_GENERIC_ID_RESET_DEV_ON_DRV_PROBE,
|
||||
DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
|
||||
|
||||
/* add new param generic ids above here*/
|
||||
__DEVLINK_PARAM_GENERIC_ID_MAX,
|
||||
|
@ -434,6 +435,9 @@ enum devlink_param_generic_id {
|
|||
"reset_dev_on_drv_probe"
|
||||
#define DEVLINK_PARAM_GENERIC_RESET_DEV_ON_DRV_PROBE_TYPE DEVLINK_PARAM_TYPE_U8
|
||||
|
||||
#define DEVLINK_PARAM_GENERIC_ENABLE_ROCE_NAME "enable_roce"
|
||||
#define DEVLINK_PARAM_GENERIC_ENABLE_ROCE_TYPE DEVLINK_PARAM_TYPE_BOOL
|
||||
|
||||
#define DEVLINK_PARAM_GENERIC(_id, _cmodes, _get, _set, _validate) \
|
||||
{ \
|
||||
.id = DEVLINK_PARAM_GENERIC_ID_##_id, \
|
||||
|
|
|
@ -2477,6 +2477,9 @@ struct ib_device_ops {
|
|||
struct ifla_vf_info *ivf);
|
||||
int (*get_vf_stats)(struct ib_device *device, int vf, u8 port,
|
||||
struct ifla_vf_stats *stats);
|
||||
int (*get_vf_guid)(struct ib_device *device, int vf, u8 port,
|
||||
struct ifla_vf_guid *node_guid,
|
||||
struct ifla_vf_guid *port_guid);
|
||||
int (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid,
|
||||
int type);
|
||||
struct ib_wq *(*create_wq)(struct ib_pd *pd,
|
||||
|
@ -3342,6 +3345,9 @@ int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
|
|||
struct ifla_vf_info *info);
|
||||
int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
|
||||
struct ifla_vf_stats *stats);
|
||||
int ib_get_vf_guid(struct ib_device *device, int vf, u8 port,
|
||||
struct ifla_vf_guid *node_guid,
|
||||
struct ifla_vf_guid *port_guid);
|
||||
int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
|
||||
int type);
|
||||
|
||||
|
|
|
@ -2884,6 +2884,11 @@ static const struct devlink_param devlink_param_generic[] = {
|
|||
.name = DEVLINK_PARAM_GENERIC_RESET_DEV_ON_DRV_PROBE_NAME,
|
||||
.type = DEVLINK_PARAM_GENERIC_RESET_DEV_ON_DRV_PROBE_TYPE,
|
||||
},
|
||||
{
|
||||
.id = DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
|
||||
.name = DEVLINK_PARAM_GENERIC_ENABLE_ROCE_NAME,
|
||||
.type = DEVLINK_PARAM_GENERIC_ENABLE_ROCE_TYPE,
|
||||
},
|
||||
};
|
||||
|
||||
static int devlink_param_generic_verify(const struct devlink_param *param)
|
||||
|
|
|
@ -1204,6 +1204,8 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
|
|||
struct ifla_vf_mac vf_mac;
|
||||
struct ifla_vf_broadcast vf_broadcast;
|
||||
struct ifla_vf_info ivi;
|
||||
struct ifla_vf_guid node_guid;
|
||||
struct ifla_vf_guid port_guid;
|
||||
|
||||
memset(&ivi, 0, sizeof(ivi));
|
||||
|
||||
|
@ -1270,6 +1272,18 @@ static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
|
|||
nla_put(skb, IFLA_VF_TRUST,
|
||||
sizeof(vf_trust), &vf_trust))
|
||||
goto nla_put_vf_failure;
|
||||
|
||||
memset(&node_guid, 0, sizeof(node_guid));
|
||||
memset(&port_guid, 0, sizeof(port_guid));
|
||||
if (dev->netdev_ops->ndo_get_vf_guid &&
|
||||
!dev->netdev_ops->ndo_get_vf_guid(dev, vfs_num, &node_guid,
|
||||
&port_guid)) {
|
||||
if (nla_put(skb, IFLA_VF_IB_NODE_GUID, sizeof(node_guid),
|
||||
&node_guid) ||
|
||||
nla_put(skb, IFLA_VF_IB_PORT_GUID, sizeof(port_guid),
|
||||
&port_guid))
|
||||
goto nla_put_vf_failure;
|
||||
}
|
||||
vfvlanlist = nla_nest_start_noflag(skb, IFLA_VF_VLAN_LIST);
|
||||
if (!vfvlanlist)
|
||||
goto nla_put_vf_failure;
|
||||
|
|
Loading…
Reference in New Issue