mlx5-updates-2020-09-30

Updates and cleanups for mlx5 driver:
 
 1) From Ariel, Dan Carpenter and Gostavo, Fixes to the previous
    mlx5 Connection track series.
 
 2) From Yevgeny, trivial cleanups for Software steering
 
 3) From Hamdan, Support for Flow source hint in software steering and
    E-Switch
 
 4) From Parav and Sunil, Small and trivial E-Switch updates and
    cleanups in preparation for mlx5 Sub-functions support
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCAAdFiEEGhZs6bAKwk/OTgTpSD+KveBX+j4FAl91WncACgkQSD+KveBX
 +j4cOQgAuuHPFMht6x3/L1uTuZm3QTam2FmvaNDn1dcZG1oqq3ZILCh9zY4bYQ8x
 PGcVhtEp8IbTOhgoUM40X3LOfzIBdOK2WfOh54LB6vXX7yvPBk3u1491vrF38eqQ
 7JDaJwQpJKir0oqnumQOWHr0vffiTzTZSBu9huUgxBXzzA0HVpm6Z1Kh5G4dm+Ot
 xI5+Qb4BqssiiSw+NbyQcE8k2/7MgaQh/I6lGjVeN0uxiZCdtz9/sFcfkEsTmp/E
 lFdi4rdlHF+ejZmhEzobdxQmFo5IEiOKb1MoRTOWAqjQgIHwsnsHfat5O3wn2N7k
 FNb8lZIKBrrgT8XDbzgQ6T4Im2wLQw==
 =hFxp
 -----END PGP SIGNATURE-----

Merge tag 'mlx5-updates-2020-09-30' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5-updates-2020-09-30

Updates and cleanups for mlx5 driver:

1) From Ariel, Dan Carpenter and Gostavo, Fixes to the previous
   mlx5 Connection track series.

2) From Yevgeny, trivial cleanups for Software steering

3) From Hamdan, Support for Flow source hint in software steering and
   E-Switch

4) From Parav and Sunil, Small and trivial E-Switch updates and
   cleanups in preparation for mlx5 Sub-functions support
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2020-10-01 12:24:52 -07:00
commit 87d5034d07
28 changed files with 371 additions and 355 deletions

View File

@ -49,7 +49,8 @@ mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o eswitch_offlo
ecpf.o rdma.o
mlx5_core-$(CONFIG_MLX5_ESWITCH) += esw/acl/helper.o \
esw/acl/egress_lgcy.o esw/acl/egress_ofld.o \
esw/acl/ingress_lgcy.o esw/acl/ingress_ofld.o
esw/acl/ingress_lgcy.o esw/acl/ingress_ofld.o \
esw/devlink_port.o
mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
mlx5_core-$(CONFIG_VXLAN) += lib/vxlan.o

View File

@ -56,8 +56,8 @@ static void *mlx5_dma_zalloc_coherent_node(struct mlx5_core_dev *dev,
size_t size, dma_addr_t *dma_handle,
int node)
{
struct device *device = mlx5_core_dma_dev(dev);
struct mlx5_priv *priv = &dev->priv;
struct device *device = dev->device;
int original_node;
void *cpu_handle;
@ -111,7 +111,7 @@ EXPORT_SYMBOL(mlx5_buf_alloc);
void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf)
{
dma_free_coherent(dev->device, buf->size, buf->frags->buf,
dma_free_coherent(mlx5_core_dma_dev(dev), buf->size, buf->frags->buf,
buf->frags->map);
kfree(buf->frags);
@ -140,7 +140,7 @@ int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
if (!frag->buf)
goto err_free_buf;
if (frag->map & ((1 << buf->page_shift) - 1)) {
dma_free_coherent(dev->device, frag_sz,
dma_free_coherent(mlx5_core_dma_dev(dev), frag_sz,
buf->frags[i].buf, buf->frags[i].map);
mlx5_core_warn(dev, "unexpected map alignment: %pad, page_shift=%d\n",
&frag->map, buf->page_shift);
@ -153,7 +153,7 @@ int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
err_free_buf:
while (i--)
dma_free_coherent(dev->device, PAGE_SIZE, buf->frags[i].buf,
dma_free_coherent(mlx5_core_dma_dev(dev), PAGE_SIZE, buf->frags[i].buf,
buf->frags[i].map);
kfree(buf->frags);
err_out:
@ -169,7 +169,7 @@ void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf)
for (i = 0; i < buf->npages; i++) {
int frag_sz = min_t(int, size, PAGE_SIZE);
dma_free_coherent(dev->device, frag_sz, buf->frags[i].buf,
dma_free_coherent(mlx5_core_dma_dev(dev), frag_sz, buf->frags[i].buf,
buf->frags[i].map);
size -= frag_sz;
}
@ -275,7 +275,7 @@ void mlx5_db_free(struct mlx5_core_dev *dev, struct mlx5_db *db)
__set_bit(db->index, db->u.pgdir->bitmap);
if (bitmap_full(db->u.pgdir->bitmap, db_per_page)) {
dma_free_coherent(dev->device, PAGE_SIZE,
dma_free_coherent(mlx5_core_dma_dev(dev), PAGE_SIZE,
db->u.pgdir->db_page, db->u.pgdir->db_dma);
list_del(&db->u.pgdir->list);
bitmap_free(db->u.pgdir->bitmap);

View File

@ -1899,9 +1899,7 @@ static void create_msg_cache(struct mlx5_core_dev *dev)
static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
{
struct device *ddev = dev->device;
cmd->cmd_alloc_buf = dma_alloc_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE,
cmd->cmd_alloc_buf = dma_alloc_coherent(mlx5_core_dma_dev(dev), MLX5_ADAPTER_PAGE_SIZE,
&cmd->alloc_dma, GFP_KERNEL);
if (!cmd->cmd_alloc_buf)
return -ENOMEM;
@ -1914,9 +1912,9 @@ static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
return 0;
}
dma_free_coherent(ddev, MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf,
dma_free_coherent(mlx5_core_dma_dev(dev), MLX5_ADAPTER_PAGE_SIZE, cmd->cmd_alloc_buf,
cmd->alloc_dma);
cmd->cmd_alloc_buf = dma_alloc_coherent(ddev,
cmd->cmd_alloc_buf = dma_alloc_coherent(mlx5_core_dma_dev(dev),
2 * MLX5_ADAPTER_PAGE_SIZE - 1,
&cmd->alloc_dma, GFP_KERNEL);
if (!cmd->cmd_alloc_buf)
@ -1930,9 +1928,7 @@ static int alloc_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
static void free_cmd_page(struct mlx5_core_dev *dev, struct mlx5_cmd *cmd)
{
struct device *ddev = dev->device;
dma_free_coherent(ddev, cmd->alloc_size, cmd->cmd_alloc_buf,
dma_free_coherent(mlx5_core_dma_dev(dev), cmd->alloc_size, cmd->cmd_alloc_buf,
cmd->alloc_dma);
}
@ -1964,7 +1960,7 @@ int mlx5_cmd_init(struct mlx5_core_dev *dev)
if (!cmd->stats)
return -ENOMEM;
cmd->pool = dma_pool_create("mlx5_cmd", dev->device, size, align, 0);
cmd->pool = dma_pool_create("mlx5_cmd", mlx5_core_dma_dev(dev), size, align, 0);
if (!cmd->pool) {
err = -ENOMEM;
goto dma_pool_err;

View File

@ -124,7 +124,7 @@ static void mlx5_fw_tracer_ownership_release(struct mlx5_fw_tracer *tracer)
static int mlx5_fw_tracer_create_log_buf(struct mlx5_fw_tracer *tracer)
{
struct mlx5_core_dev *dev = tracer->dev;
struct device *ddev = &dev->pdev->dev;
struct device *ddev;
dma_addr_t dma;
void *buff;
gfp_t gfp;
@ -142,6 +142,7 @@ static int mlx5_fw_tracer_create_log_buf(struct mlx5_fw_tracer *tracer)
}
tracer->buff.log_buf = buff;
ddev = mlx5_core_dma_dev(dev);
dma = dma_map_single(ddev, buff, tracer->buff.size, DMA_FROM_DEVICE);
if (dma_mapping_error(ddev, dma)) {
mlx5_core_warn(dev, "FWTracer: Unable to map DMA: %d\n",
@ -162,11 +163,12 @@ static int mlx5_fw_tracer_create_log_buf(struct mlx5_fw_tracer *tracer)
static void mlx5_fw_tracer_destroy_log_buf(struct mlx5_fw_tracer *tracer)
{
struct mlx5_core_dev *dev = tracer->dev;
struct device *ddev = &dev->pdev->dev;
struct device *ddev;
if (!tracer->buff.log_buf)
return;
ddev = mlx5_core_dma_dev(dev);
dma_unmap_single(ddev, tracer->buff.dma, tracer->buff.size, DMA_FROM_DEVICE);
free_pages((unsigned long)tracer->buff.log_buf, get_order(tracer->buff.size));
}

View File

@ -78,7 +78,7 @@ static int mlx5_rsc_dump_trigger(struct mlx5_core_dev *dev, struct mlx5_rsc_dump
struct page *page)
{
struct mlx5_rsc_dump *rsc_dump = dev->rsc_dump;
struct device *ddev = &dev->pdev->dev;
struct device *ddev = mlx5_core_dma_dev(dev);
u32 out_seq_num;
u32 in_seq_num;
dma_addr_t dma;

View File

@ -739,6 +739,7 @@ mlx5_tc_ct_shared_counter_get(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5_core_dev *dev = ct_priv->dev;
struct mlx5_ct_entry *rev_entry;
__be16 tmp_port;
int ret;
/* get the reversed tuple */
tmp_port = rev_tuple.port.src;
@ -778,8 +779,9 @@ mlx5_tc_ct_shared_counter_get(struct mlx5_tc_ct_priv *ct_priv,
shared_counter->counter = mlx5_fc_create(dev, true);
if (IS_ERR(shared_counter->counter)) {
ct_dbg("Failed to create counter for ct entry");
ret = PTR_ERR(shared_counter->counter);
kfree(shared_counter);
return ERR_PTR(PTR_ERR(shared_counter->counter));
return ERR_PTR(ret);
}
refcount_set(&shared_counter->refcount, 1);

View File

@ -9,7 +9,7 @@
static int mlx5e_xsk_map_pool(struct mlx5e_priv *priv,
struct xsk_buff_pool *pool)
{
struct device *dev = priv->mdev->device;
struct device *dev = mlx5_core_dma_dev(priv->mdev);
return xsk_pool_dma_map(pool, dev, 0);
}

View File

@ -253,7 +253,7 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq,
goto err_out;
}
pdev = sq->channel->priv->mdev->device;
pdev = mlx5_core_dma_dev(sq->channel->priv->mdev);
buf->dma_addr = dma_map_single(pdev, &buf->progress,
PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(pdev, buf->dma_addr))) {
@ -390,7 +390,7 @@ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi,
priv_rx = buf->priv_rx;
resync = &priv_rx->resync;
dev = resync->priv->mdev->device;
dev = mlx5_core_dma_dev(resync->priv->mdev);
if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags)))
goto out;

View File

@ -1943,7 +1943,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
c->tstamp = &priv->tstamp;
c->ix = ix;
c->cpu = cpu;
c->pdev = priv->mdev->device;
c->pdev = mlx5_core_dma_dev(priv->mdev);
c->netdev = priv->netdev;
c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
c->num_tc = params->num_tc;
@ -2131,7 +2131,7 @@ void mlx5e_build_rq_param(struct mlx5e_priv *priv,
MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable);
MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en);
param->wq.buf_numa_node = dev_to_node(mdev->device);
param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
mlx5e_build_rx_cq_param(priv, params, xsk, &param->cqp);
}
@ -2147,7 +2147,7 @@ static void mlx5e_build_drop_rq_param(struct mlx5e_priv *priv,
mlx5e_get_rqwq_log_stride(MLX5_WQ_TYPE_CYCLIC, 1));
MLX5_SET(rqc, rqc, counter_set_id, priv->drop_rq_q_counter);
param->wq.buf_numa_node = dev_to_node(mdev->device);
param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
}
void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
@ -2159,7 +2159,7 @@ void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn);
param->wq.buf_numa_node = dev_to_node(priv->mdev->device);
param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(priv->mdev));
}
static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
@ -3197,8 +3197,8 @@ static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev,
struct mlx5e_cq *cq,
struct mlx5e_cq_param *param)
{
param->wq.buf_numa_node = dev_to_node(mdev->device);
param->wq.db_numa_node = dev_to_node(mdev->device);
param->wq.buf_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
param->wq.db_numa_node = dev_to_node(mlx5_core_dma_dev(mdev));
return mlx5e_alloc_cq_common(mdev, param, cq);
}

View File

@ -374,19 +374,6 @@ static const struct ethtool_ops mlx5e_uplink_rep_ethtool_ops = {
.set_pauseparam = mlx5e_uplink_rep_set_pauseparam,
};
static void mlx5e_rep_get_port_parent_id(struct net_device *dev,
struct netdev_phys_item_id *ppid)
{
struct mlx5e_priv *priv;
u64 parent_id;
priv = netdev_priv(dev);
parent_id = mlx5_query_nic_system_image_guid(priv->mdev);
ppid->id_len = sizeof(parent_id);
memcpy(ppid->id, &parent_id, sizeof(parent_id));
}
static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
struct mlx5_eswitch_rep *rep)
{
@ -611,12 +598,13 @@ static int mlx5e_uplink_rep_set_vf_vlan(struct net_device *dev, int vf, u16 vlan
return 0;
}
static struct devlink_port *mlx5e_rep_get_devlink_port(struct net_device *dev)
static struct devlink_port *mlx5e_rep_get_devlink_port(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_core_dev *dev = priv->mdev;
return &rpriv->dl_port;
return mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport);
}
static int mlx5e_rep_change_carrier(struct net_device *dev, bool new_carrier)
@ -1206,70 +1194,13 @@ static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
.stats_grps_num = mlx5e_ul_rep_stats_grps_num,
};
static bool
is_devlink_port_supported(const struct mlx5_core_dev *dev,
const struct mlx5e_rep_priv *rpriv)
{
return rpriv->rep->vport == MLX5_VPORT_UPLINK ||
rpriv->rep->vport == MLX5_VPORT_PF ||
mlx5_eswitch_is_vf_vport(dev->priv.eswitch, rpriv->rep->vport);
}
static int register_devlink_port(struct mlx5_core_dev *dev,
struct mlx5e_rep_priv *rpriv)
{
struct mlx5_esw_offload *offloads = &dev->priv.eswitch->offloads;
struct devlink *devlink = priv_to_devlink(dev);
struct mlx5_eswitch_rep *rep = rpriv->rep;
struct devlink_port_attrs attrs = {};
struct netdev_phys_item_id ppid = {};
unsigned int dl_port_index = 0;
u32 controller_num = 0;
bool external;
u16 pfnum;
if (!is_devlink_port_supported(dev, rpriv))
return 0;
external = mlx5_core_is_ecpf_esw_manager(dev);
if (external)
controller_num = offloads->host_number + 1;
mlx5e_rep_get_port_parent_id(rpriv->netdev, &ppid);
dl_port_index = mlx5_esw_vport_to_devlink_port_index(dev, rep->vport);
pfnum = PCI_FUNC(dev->pdev->devfn);
if (rep->vport == MLX5_VPORT_UPLINK) {
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
attrs.phys.port_number = pfnum;
memcpy(attrs.switch_id.id, &ppid.id[0], ppid.id_len);
attrs.switch_id.id_len = ppid.id_len;
devlink_port_attrs_set(&rpriv->dl_port, &attrs);
} else if (rep->vport == MLX5_VPORT_PF) {
memcpy(rpriv->dl_port.attrs.switch_id.id, &ppid.id[0], ppid.id_len);
rpriv->dl_port.attrs.switch_id.id_len = ppid.id_len;
devlink_port_attrs_pci_pf_set(&rpriv->dl_port, controller_num,
pfnum, external);
} else if (mlx5_eswitch_is_vf_vport(dev->priv.eswitch, rpriv->rep->vport)) {
memcpy(rpriv->dl_port.attrs.switch_id.id, &ppid.id[0], ppid.id_len);
rpriv->dl_port.attrs.switch_id.id_len = ppid.id_len;
devlink_port_attrs_pci_vf_set(&rpriv->dl_port, controller_num,
pfnum, rep->vport - 1, external);
}
return devlink_port_register(devlink, &rpriv->dl_port, dl_port_index);
}
static void unregister_devlink_port(struct mlx5_core_dev *dev,
struct mlx5e_rep_priv *rpriv)
{
if (is_devlink_port_supported(dev, rpriv))
devlink_port_unregister(&rpriv->dl_port);
}
/* e-Switch vport representors */
static int
mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
{
const struct mlx5e_profile *profile;
struct mlx5e_rep_priv *rpriv;
struct devlink_port *dl_port;
struct net_device *netdev;
int nch, err;
@ -1319,28 +1250,19 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
goto err_detach_netdev;
}
err = register_devlink_port(dev, rpriv);
if (err) {
netdev_warn(netdev, "Failed to register devlink port %d\n",
rep->vport);
goto err_neigh_cleanup;
}
err = register_netdev(netdev);
if (err) {
netdev_warn(netdev,
"Failed to register representor netdev for vport %d\n",
rep->vport);
goto err_devlink_cleanup;
goto err_neigh_cleanup;
}
if (is_devlink_port_supported(dev, rpriv))
devlink_port_type_eth_set(&rpriv->dl_port, netdev);
dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport);
if (dl_port)
devlink_port_type_eth_set(dl_port, netdev);
return 0;
err_devlink_cleanup:
unregister_devlink_port(dev, rpriv);
err_neigh_cleanup:
mlx5e_rep_neigh_cleanup(rpriv);
@ -1364,12 +1286,13 @@ mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
struct net_device *netdev = rpriv->netdev;
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *dev = priv->mdev;
struct devlink_port *dl_port;
void *ppriv = priv->ppriv;
if (is_devlink_port_supported(dev, rpriv))
devlink_port_type_clear(&rpriv->dl_port);
dl_port = mlx5_esw_offloads_devlink_port(dev->priv.eswitch, rpriv->rep->vport);
if (dl_port)
devlink_port_type_clear(dl_port);
unregister_netdev(netdev);
unregister_devlink_port(dev, rpriv);
mlx5e_rep_neigh_cleanup(rpriv);
mlx5e_detach_netdev(priv);
if (rep->vport == MLX5_VPORT_UPLINK)

View File

@ -101,7 +101,6 @@ struct mlx5e_rep_priv {
struct list_head vport_sqs_list;
struct mlx5_rep_uplink_priv uplink_priv; /* valid for uplink rep */
struct rtnl_link_stats64 prev_vf_vport_stats;
struct devlink_port dl_port;
};
static inline

View File

@ -1238,8 +1238,10 @@ mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
struct mlx5_flow_attr *slow_attr;
slow_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB);
if (!slow_attr)
mlx5_core_warn(flow->priv->mdev, "Unable to unoffload slow path rule\n");
if (!slow_attr) {
mlx5_core_warn(flow->priv->mdev, "Unable to alloc attr to unoffload slow path rule\n");
return;
}
memcpy(slow_attr, flow->attr, ESW_FLOW_ATTR_SZ);
slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
@ -4534,20 +4536,22 @@ mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
struct mlx5e_tc_flow_parse_attr *parse_attr;
struct mlx5_flow_attr *attr;
struct mlx5e_tc_flow *flow;
int out_index, err;
int err = -ENOMEM;
int out_index;
flow = kzalloc(sizeof(*flow), GFP_KERNEL);
parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
if (!parse_attr || !flow)
goto err_free;
flow->flags = flow_flags;
flow->cookie = f->cookie;
flow->priv = priv;
attr = mlx5_alloc_flow_attr(get_flow_name_space(flow));
if (!parse_attr || !flow || !attr) {
err = -ENOMEM;
if (!attr)
goto err_free;
}
flow->attr = attr;
for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)

View File

@ -148,6 +148,11 @@ static void esw_acl_egress_ofld_groups_destroy(struct mlx5_vport *vport)
esw_acl_egress_vlan_grp_destroy(vport);
}
static bool esw_acl_egress_needed(const struct mlx5_eswitch *esw, u16 vport_num)
{
return mlx5_eswitch_is_vf_vport(esw, vport_num);
}
int esw_acl_egress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
{
int table_size = 0;
@ -157,6 +162,9 @@ int esw_acl_egress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport
!MLX5_CAP_GEN(esw->dev, prio_tag_required))
return 0;
if (!esw_acl_egress_needed(esw, vport->vport))
return 0;
esw_acl_egress_ofld_rules_destroy(vport);
if (mlx5_esw_acl_egress_fwd2vport_supported(esw))

View File

@ -0,0 +1,124 @@
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2020 Mellanox Technologies Ltd. */
#include <linux/mlx5/driver.h>
#include "eswitch.h"
static void
mlx5_esw_get_port_parent_id(struct mlx5_core_dev *dev, struct netdev_phys_item_id *ppid)
{
u64 parent_id;
parent_id = mlx5_query_nic_system_image_guid(dev);
ppid->id_len = sizeof(parent_id);
memcpy(ppid->id, &parent_id, sizeof(parent_id));
}
static bool
mlx5_esw_devlink_port_supported(const struct mlx5_eswitch *esw, u16 vport_num)
{
return vport_num == MLX5_VPORT_UPLINK ||
(mlx5_core_is_ecpf(esw->dev) && vport_num == MLX5_VPORT_PF) ||
mlx5_eswitch_is_vf_vport(esw, vport_num);
}
static struct devlink_port *mlx5_esw_dl_port_alloc(struct mlx5_eswitch *esw, u16 vport_num)
{
struct mlx5_core_dev *dev = esw->dev;
struct devlink_port_attrs attrs = {};
struct netdev_phys_item_id ppid = {};
struct devlink_port *dl_port;
u32 controller_num = 0;
bool external;
u16 pfnum;
dl_port = kzalloc(sizeof(*dl_port), GFP_KERNEL);
if (!dl_port)
return NULL;
mlx5_esw_get_port_parent_id(dev, &ppid);
pfnum = PCI_FUNC(dev->pdev->devfn);
external = mlx5_core_is_ecpf_esw_manager(dev);
if (external)
controller_num = dev->priv.eswitch->offloads.host_number + 1;
if (vport_num == MLX5_VPORT_UPLINK) {
attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
attrs.phys.port_number = pfnum;
memcpy(attrs.switch_id.id, ppid.id, ppid.id_len);
attrs.switch_id.id_len = ppid.id_len;
devlink_port_attrs_set(dl_port, &attrs);
} else if (vport_num == MLX5_VPORT_PF) {
memcpy(dl_port->attrs.switch_id.id, ppid.id, ppid.id_len);
dl_port->attrs.switch_id.id_len = ppid.id_len;
devlink_port_attrs_pci_pf_set(dl_port, controller_num, pfnum, external);
} else if (mlx5_eswitch_is_vf_vport(esw, vport_num)) {
memcpy(dl_port->attrs.switch_id.id, ppid.id, ppid.id_len);
dl_port->attrs.switch_id.id_len = ppid.id_len;
devlink_port_attrs_pci_vf_set(dl_port, controller_num, pfnum,
vport_num - 1, external);
}
return dl_port;
}
static void mlx5_esw_dl_port_free(struct devlink_port *dl_port)
{
kfree(dl_port);
}
int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, u16 vport_num)
{
struct mlx5_core_dev *dev = esw->dev;
struct devlink_port *dl_port;
unsigned int dl_port_index;
struct mlx5_vport *vport;
struct devlink *devlink;
int err;
if (!mlx5_esw_devlink_port_supported(esw, vport_num))
return 0;
vport = mlx5_eswitch_get_vport(esw, vport_num);
if (IS_ERR(vport))
return PTR_ERR(vport);
dl_port = mlx5_esw_dl_port_alloc(esw, vport_num);
if (!dl_port)
return -ENOMEM;
devlink = priv_to_devlink(dev);
dl_port_index = mlx5_esw_vport_to_devlink_port_index(dev, vport_num);
err = devlink_port_register(devlink, dl_port, dl_port_index);
if (err)
goto reg_err;
vport->dl_port = dl_port;
return 0;
reg_err:
mlx5_esw_dl_port_free(dl_port);
return err;
}
void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, u16 vport_num)
{
struct mlx5_vport *vport;
if (!mlx5_esw_devlink_port_supported(esw, vport_num))
return;
vport = mlx5_eswitch_get_vport(esw, vport_num);
if (IS_ERR(vport))
return;
devlink_port_unregister(vport->dl_port);
mlx5_esw_dl_port_free(vport->dl_port);
vport->dl_port = NULL;
}
struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num)
{
struct mlx5_vport *vport;
vport = mlx5_eswitch_get_vport(esw, vport_num);
return vport->dl_port;
}

View File

@ -156,6 +156,7 @@ struct mlx5_vport {
bool enabled;
enum mlx5_eswitch_vport_event enabled_events;
struct devlink_port *dl_port;
};
struct mlx5_eswitch_fdb {
@ -663,6 +664,9 @@ int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs,
enum mlx5_eswitch_vport_event enabled_events);
void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs);
int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, u16 vport_num);
void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, u16 vport_num);
struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num);
#else /* CONFIG_MLX5_ESWITCH */
/* eswitch API stubs */
static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; }

View File

@ -247,8 +247,11 @@ mlx5_eswitch_set_rule_flow_source(struct mlx5_eswitch *esw,
struct mlx5_esw_flow_attr *attr)
{
if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) &&
attr && attr->in_rep && attr->in_rep->vport == MLX5_VPORT_UPLINK)
spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
attr && attr->in_rep)
spec->flow_context.flow_source =
attr->in_rep->vport == MLX5_VPORT_UPLINK ?
MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK :
MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
}
static void
@ -1820,15 +1823,12 @@ static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
__esw_offloads_unload_rep(esw, rep, rep_type);
}
int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num)
static int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num)
{
struct mlx5_eswitch_rep *rep;
int rep_type;
int err;
if (esw->mode != MLX5_ESWITCH_OFFLOADS)
return 0;
rep = mlx5_eswitch_get_rep(esw, vport_num);
for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
@ -1847,19 +1847,46 @@ int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num)
return err;
}
void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num)
static void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num)
{
struct mlx5_eswitch_rep *rep;
int rep_type;
if (esw->mode != MLX5_ESWITCH_OFFLOADS)
return;
rep = mlx5_eswitch_get_rep(esw, vport_num);
for (rep_type = NUM_REP_TYPES - 1; rep_type >= 0; rep_type--)
__esw_offloads_unload_rep(esw, rep, rep_type);
}
int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num)
{
int err;
if (esw->mode != MLX5_ESWITCH_OFFLOADS)
return 0;
err = mlx5_esw_offloads_devlink_port_register(esw, vport_num);
if (err)
return err;
err = mlx5_esw_offloads_rep_load(esw, vport_num);
if (err)
goto load_err;
return err;
load_err:
mlx5_esw_offloads_devlink_port_unregister(esw, vport_num);
return err;
}
void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num)
{
if (esw->mode != MLX5_ESWITCH_OFFLOADS)
return;
mlx5_esw_offloads_rep_unload(esw, vport_num);
mlx5_esw_offloads_devlink_port_unregister(esw, vport_num);
}
#define ESW_OFFLOADS_DEVCOM_PAIR (0)
#define ESW_OFFLOADS_DEVCOM_UNPAIR (1)
@ -2019,31 +2046,31 @@ esw_check_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw)
{
u32 num_vports = GENMASK(ESW_VPORT_BITS - 1, 0) - 1;
u32 vhca_id_mask = GENMASK(ESW_VHCA_ID_BITS - 1, 0);
u32 vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id);
u32 start;
u32 end;
u32 vport_end_ida = (1 << ESW_VPORT_BITS) - 1;
u32 max_pf_num = (1 << ESW_PFNUM_BITS) - 1;
u32 pf_num;
int id;
/* Make sure the vhca_id fits the ESW_VHCA_ID_BITS */
WARN_ON_ONCE(vhca_id >= BIT(ESW_VHCA_ID_BITS));
/* Only 4 bits of pf_num */
pf_num = PCI_FUNC(esw->dev->pdev->devfn);
if (pf_num > max_pf_num)
return 0;
/* Trim vhca_id to ESW_VHCA_ID_BITS */
vhca_id &= vhca_id_mask;
start = (vhca_id << ESW_VPORT_BITS);
end = start + num_vports;
if (!vhca_id)
start += 1; /* zero is reserved/invalid metadata */
id = ida_alloc_range(&esw->offloads.vport_metadata_ida, start, end, GFP_KERNEL);
return (id < 0) ? 0 : id;
/* Metadata is 4 bits of PFNUM and 12 bits of unique id */
/* Use only non-zero vport_id (1-4095) for all PF's */
id = ida_alloc_range(&esw->offloads.vport_metadata_ida, 1, vport_end_ida, GFP_KERNEL);
if (id < 0)
return 0;
id = (pf_num << ESW_VPORT_BITS) | id;
return id;
}
void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata)
{
ida_free(&esw->offloads.vport_metadata_ida, metadata);
u32 vport_bit_mask = (1 << ESW_VPORT_BITS) - 1;
/* Metadata contains only 12 bits of actual ida id */
ida_free(&esw->offloads.vport_metadata_ida, metadata & vport_bit_mask);
}
static int esw_offloads_vport_metadata_setup(struct mlx5_eswitch *esw,
@ -2108,11 +2135,9 @@ esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
if (err)
return err;
if (mlx5_eswitch_is_vf_vport(esw, vport->vport)) {
err = esw_acl_egress_ofld_setup(esw, vport);
if (err)
goto egress_err;
}
err = esw_acl_egress_ofld_setup(esw, vport);
if (err)
goto egress_err;
return 0;

View File

@ -54,7 +54,7 @@ static int mlx5_fpga_conn_map_buf(struct mlx5_fpga_conn *conn,
if (unlikely(!buf->sg[0].data))
goto out;
dma_device = &conn->fdev->mdev->pdev->dev;
dma_device = mlx5_core_dma_dev(conn->fdev->mdev);
buf->sg[0].dma_addr = dma_map_single(dma_device, buf->sg[0].data,
buf->sg[0].size, buf->dma_dir);
err = dma_mapping_error(dma_device, buf->sg[0].dma_addr);
@ -86,7 +86,7 @@ static void mlx5_fpga_conn_unmap_buf(struct mlx5_fpga_conn *conn,
{
struct device *dma_device;
dma_device = &conn->fdev->mdev->pdev->dev;
dma_device = mlx5_core_dma_dev(conn->fdev->mdev);
if (buf->sg[1].data)
dma_unmap_single(dma_device, buf->sg[1].dma_addr,
buf->sg[1].size, buf->dma_dir);

View File

@ -739,7 +739,7 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct pci_dev *pdev,
pci_set_drvdata(dev->pdev, dev);
dev->bar_addr = pci_resource_start(pdev, 0);
priv->numa_node = dev_to_node(&dev->pdev->dev);
priv->numa_node = dev_to_node(mlx5_core_dma_dev(dev));
err = mlx5_pci_enable_device(dev);
if (err) {

View File

@ -100,6 +100,11 @@ do { \
__func__, __LINE__, current->pid, \
##__VA_ARGS__)
static inline struct device *mlx5_core_dma_dev(struct mlx5_core_dev *dev)
{
return &dev->pdev->dev;
}
enum {
MLX5_CMD_DATA, /* print command payload only */
MLX5_CMD_TIME, /* print command execution time */

View File

@ -238,7 +238,7 @@ static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp,
rb_erase(&fwp->rb_node, root);
if (in_free_list)
list_del(&fwp->list);
dma_unmap_page(dev->device, fwp->addr & MLX5_U64_4K_PAGE_MASK,
dma_unmap_page(mlx5_core_dma_dev(dev), fwp->addr & MLX5_U64_4K_PAGE_MASK,
PAGE_SIZE, DMA_BIDIRECTIONAL);
__free_page(fwp->page);
kfree(fwp);
@ -265,7 +265,7 @@ static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 func_id)
static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
{
struct device *device = dev->device;
struct device *device = mlx5_core_dma_dev(dev);
int nid = dev_to_node(device);
struct page *page;
u64 zero_addr = 1;

View File

@ -203,7 +203,6 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn;
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
struct mlx5dr_match_param mask = {};
struct mlx5dr_match_misc3 *misc3;
struct mlx5dr_ste_build *sb;
bool inner, rx;
int idx = 0;
@ -252,18 +251,14 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
if (dr_mask_is_gvmi_or_qpn_set(&mask.misc) &&
(dmn->type == MLX5DR_DOMAIN_TYPE_FDB ||
dmn->type == MLX5DR_DOMAIN_TYPE_NIC_RX)) {
ret = mlx5dr_ste_build_src_gvmi_qpn(&sb[idx++], &mask,
dmn, inner, rx);
if (ret)
return ret;
mlx5dr_ste_build_src_gvmi_qpn(&sb[idx++], &mask,
dmn, inner, rx);
}
if (dr_mask_is_smac_set(&mask.outer) &&
dr_mask_is_dmac_set(&mask.outer)) {
ret = mlx5dr_ste_build_eth_l2_src_des(&sb[idx++], &mask,
inner, rx);
if (ret)
return ret;
mlx5dr_ste_build_eth_l2_src_des(&sb[idx++], &mask,
inner, rx);
}
if (dr_mask_is_smac_set(&mask.outer))
@ -313,8 +308,7 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
mlx5dr_ste_build_flex_parser_0(&sb[idx++], &mask,
inner, rx);
misc3 = &mask.misc3;
if ((DR_MASK_IS_FLEX_PARSER_ICMPV4_SET(misc3) &&
if ((DR_MASK_IS_FLEX_PARSER_ICMPV4_SET(&mask.misc3) &&
mlx5dr_matcher_supp_flex_parser_icmp_v4(&dmn->info.caps)) ||
(dr_mask_is_flex_parser_icmpv6_set(&mask.misc3) &&
mlx5dr_matcher_supp_flex_parser_icmp_v6(&dmn->info.caps))) {
@ -340,10 +334,8 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher,
if (dr_mask_is_smac_set(&mask.inner) &&
dr_mask_is_dmac_set(&mask.inner)) {
ret = mlx5dr_ste_build_eth_l2_src_des(&sb[idx++],
&mask, inner, rx);
if (ret)
return ret;
mlx5dr_ste_build_eth_l2_src_des(&sb[idx++],
&mask, inner, rx);
}
if (dr_mask_is_smac_set(&mask.inner))

View File

@ -242,7 +242,7 @@ dr_rule_rehash_copy_ste(struct mlx5dr_matcher *matcher,
new_idx = mlx5dr_ste_calc_hash_index(hw_ste, new_htbl);
new_ste = &new_htbl->ste_arr[new_idx];
if (mlx5dr_ste_not_used_ste(new_ste)) {
if (mlx5dr_ste_is_not_used(new_ste)) {
mlx5dr_htbl_get(new_htbl);
list_add_tail(&new_ste->miss_list_node,
mlx5dr_ste_get_miss_list(new_ste));
@ -335,7 +335,7 @@ static int dr_rule_rehash_copy_htbl(struct mlx5dr_matcher *matcher,
for (i = 0; i < cur_entries; i++) {
cur_ste = &cur_htbl->ste_arr[i];
if (mlx5dr_ste_not_used_ste(cur_ste)) /* Empty, nothing to copy */
if (mlx5dr_ste_is_not_used(cur_ste)) /* Empty, nothing to copy */
continue;
err = dr_rule_rehash_copy_miss_list(matcher,
@ -791,7 +791,7 @@ dr_rule_handle_ste_branch(struct mlx5dr_rule *rule,
miss_list = &cur_htbl->chunk->miss_list[index];
ste = &cur_htbl->ste_arr[index];
if (mlx5dr_ste_not_used_ste(ste)) {
if (mlx5dr_ste_is_not_used(ste)) {
if (dr_rule_handle_empty_entry(matcher, nic_matcher, cur_htbl,
ste, ste_location,
hw_ste, miss_list,
@ -985,31 +985,28 @@ static enum mlx5dr_ipv dr_rule_get_ipv(struct mlx5dr_match_spec *spec)
static bool dr_rule_skip(enum mlx5dr_domain_type domain,
enum mlx5dr_ste_entry_type ste_type,
struct mlx5dr_match_param *mask,
struct mlx5dr_match_param *value)
struct mlx5dr_match_param *value,
u32 flow_source)
{
bool rx = ste_type == MLX5DR_STE_TYPE_RX;
if (domain != MLX5DR_DOMAIN_TYPE_FDB)
return false;
if (mask->misc.source_port) {
if (ste_type == MLX5DR_STE_TYPE_RX)
if (value->misc.source_port != WIRE_PORT)
return true;
if (rx && value->misc.source_port != WIRE_PORT)
return true;
if (ste_type == MLX5DR_STE_TYPE_TX)
if (value->misc.source_port == WIRE_PORT)
return true;
if (!rx && value->misc.source_port == WIRE_PORT)
return true;
}
/* Metadata C can be used to describe the source vport */
if (mask->misc2.metadata_reg_c_0) {
if (ste_type == MLX5DR_STE_TYPE_RX)
if ((value->misc2.metadata_reg_c_0 & WIRE_PORT) != WIRE_PORT)
return true;
if (rx && flow_source == MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT)
return true;
if (!rx && flow_source == MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK)
return true;
if (ste_type == MLX5DR_STE_TYPE_TX)
if ((value->misc2.metadata_reg_c_0 & WIRE_PORT) == WIRE_PORT)
return true;
}
return false;
}
@ -1038,7 +1035,8 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule,
INIT_LIST_HEAD(&nic_rule->rule_members_list);
if (dr_rule_skip(dmn->type, nic_dmn->ste_type, &matcher->mask, param))
if (dr_rule_skip(dmn->type, nic_dmn->ste_type, &matcher->mask, param,
rule->flow_source))
return 0;
hw_ste_arr = kzalloc(DR_RULE_MAX_STE_CHAIN * DR_STE_SIZE, GFP_KERNEL);
@ -1173,7 +1171,8 @@ static struct mlx5dr_rule *
dr_rule_create_rule(struct mlx5dr_matcher *matcher,
struct mlx5dr_match_parameters *value,
size_t num_actions,
struct mlx5dr_action *actions[])
struct mlx5dr_action *actions[],
u32 flow_source)
{
struct mlx5dr_domain *dmn = matcher->tbl->dmn;
struct mlx5dr_match_param param = {};
@ -1188,6 +1187,7 @@ dr_rule_create_rule(struct mlx5dr_matcher *matcher,
return NULL;
rule->matcher = matcher;
rule->flow_source = flow_source;
INIT_LIST_HEAD(&rule->rule_actions_list);
ret = dr_rule_add_action_members(rule, num_actions, actions);
@ -1232,13 +1232,14 @@ dr_rule_create_rule(struct mlx5dr_matcher *matcher,
struct mlx5dr_rule *mlx5dr_rule_create(struct mlx5dr_matcher *matcher,
struct mlx5dr_match_parameters *value,
size_t num_actions,
struct mlx5dr_action *actions[])
struct mlx5dr_action *actions[],
u32 flow_source)
{
struct mlx5dr_rule *rule;
refcount_inc(&matcher->refcount);
rule = dr_rule_create_rule(matcher, value, num_actions, actions);
rule = dr_rule_create_rule(matcher, value, num_actions, actions, flow_source);
if (!rule)
refcount_dec(&matcher->refcount);

View File

@ -466,10 +466,10 @@ int mlx5dr_send_postsend_htbl(struct mlx5dr_domain *dmn,
* need to add the bit_mask
*/
for (j = 0; j < num_stes_per_iter; j++) {
u8 *hw_ste = htbl->ste_arr[ste_index + j].hw_ste;
struct mlx5dr_ste *ste = &htbl->ste_arr[ste_index + j];
u32 ste_off = j * DR_STE_SIZE;
if (mlx5dr_ste_is_not_valid_entry(hw_ste)) {
if (mlx5dr_ste_is_not_used(ste)) {
memcpy(data + ste_off,
formatted_ste, DR_STE_SIZE);
} else {
@ -831,7 +831,7 @@ static struct mlx5dr_mr *dr_reg_mr(struct mlx5_core_dev *mdev,
if (!mr)
return NULL;
dma_device = &mdev->pdev->dev;
dma_device = mlx5_core_dma_dev(mdev);
dma_addr = dma_map_single(dma_device, buf, size,
DMA_BIDIRECTIONAL);
err = dma_mapping_error(dma_device, dma_addr);
@ -860,7 +860,7 @@ static struct mlx5dr_mr *dr_reg_mr(struct mlx5_core_dev *mdev,
static void dr_dereg_mr(struct mlx5_core_dev *mdev, struct mlx5dr_mr *mr)
{
mlx5_core_destroy_mkey(mdev, &mr->mkey);
dma_unmap_single(&mdev->pdev->dev, mr->dma_addr, mr->size,
dma_unmap_single(mlx5_core_dma_dev(mdev), mr->dma_addr, mr->size,
DMA_BIDIRECTIONAL);
kfree(mr);
}

View File

@ -155,6 +155,13 @@ static u16 dr_ste_conv_bit_to_byte_mask(u8 *bit_mask)
return byte_mask;
}
static u8 *mlx5dr_ste_get_tag(u8 *hw_ste_p)
{
struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
return hw_ste->tag;
}
void mlx5dr_ste_set_bit_mask(u8 *hw_ste_p, u8 *bit_mask)
{
struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
@ -549,25 +556,6 @@ void mlx5dr_ste_always_miss_addr(struct mlx5dr_ste *ste, u64 miss_addr)
dr_ste_set_always_miss((struct dr_hw_ste_format *)ste->hw_ste);
}
/* The assumption here is that we don't update the ste->hw_ste if it is not
* used ste, so it will be all zero, checking the next_lu_type.
*/
bool mlx5dr_ste_is_not_valid_entry(u8 *p_hw_ste)
{
struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)p_hw_ste;
if (MLX5_GET(ste_general, hw_ste, next_lu_type) ==
MLX5DR_STE_LU_TYPE_NOP)
return true;
return false;
}
bool mlx5dr_ste_not_used_ste(struct mlx5dr_ste *ste)
{
return !ste->refcount;
}
/* Init one ste as a pattern for ste data array */
void mlx5dr_ste_set_formatted_ste(u16 gvmi,
struct mlx5dr_domain_rx_tx *nic_dmn,
@ -728,7 +716,14 @@ int mlx5dr_ste_build_pre_check(struct mlx5dr_domain *dmn,
{
if (!value && (match_criteria & DR_MATCHER_CRITERIA_MISC)) {
if (mask->misc.source_port && mask->misc.source_port != 0xffff) {
mlx5dr_err(dmn, "Partial mask source_port is not supported\n");
mlx5dr_err(dmn,
"Partial mask source_port is not supported\n");
return -EINVAL;
}
if (mask->misc.source_eswitch_owner_vhca_id &&
mask->misc.source_eswitch_owner_vhca_id != 0xffff) {
mlx5dr_err(dmn,
"Partial mask source_eswitch_owner_vhca_id is not supported\n");
return -EINVAL;
}
}
@ -760,7 +755,7 @@ int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
mlx5dr_ste_set_bit_mask(ste_arr, sb->bit_mask);
ret = sb->ste_build_tag_func(value, sb, ste_arr);
ret = sb->ste_build_tag_func(value, sb, mlx5dr_ste_get_tag(ste_arr));
if (ret)
return ret;
@ -778,8 +773,8 @@ int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
return 0;
}
static int dr_ste_build_eth_l2_src_des_bit_mask(struct mlx5dr_match_param *value,
bool inner, u8 *bit_mask)
static void dr_ste_build_eth_l2_src_des_bit_mask(struct mlx5dr_match_param *value,
bool inner, u8 *bit_mask)
{
struct mlx5dr_match_spec *mask = inner ? &value->inner : &value->outer;
@ -807,13 +802,6 @@ static int dr_ste_build_eth_l2_src_des_bit_mask(struct mlx5dr_match_param *value
MLX5_SET(ste_eth_l2_src_dst, bit_mask, first_vlan_qualifier, -1);
mask->svlan_tag = 0;
}
if (mask->cvlan_tag || mask->svlan_tag) {
pr_info("Invalid c/svlan mask configuration\n");
return -EINVAL;
}
return 0;
}
static void dr_ste_copy_mask_misc(char *mask, struct mlx5dr_match_misc *spec)
@ -1059,11 +1047,9 @@ void mlx5dr_ste_copy_param(u8 match_criteria,
static int dr_ste_build_eth_l2_src_des_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *hw_ste_p)
u8 *tag)
{
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
u8 *tag = hw_ste->tag;
DR_STE_SET_TAG(eth_l2_src_dst, tag, dmac_47_16, spec, dmac_47_16);
DR_STE_SET_TAG(eth_l2_src_dst, tag, dmac_15_0, spec, dmac_15_0);
@ -1104,23 +1090,17 @@ static int dr_ste_build_eth_l2_src_des_tag(struct mlx5dr_match_param *value,
return 0;
}
int mlx5dr_ste_build_eth_l2_src_des(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
void mlx5dr_ste_build_eth_l2_src_des(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx)
{
int ret;
ret = dr_ste_build_eth_l2_src_des_bit_mask(mask, inner, sb->bit_mask);
if (ret)
return ret;
dr_ste_build_eth_l2_src_des_bit_mask(mask, inner, sb->bit_mask);
sb->rx = rx;
sb->inner = inner;
sb->lu_type = DR_STE_CALC_LU_TYPE(ETHL2_SRC_DST, rx, inner);
sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_build_eth_l2_src_des_tag;
return 0;
}
static void dr_ste_build_eth_l3_ipv6_dst_bit_mask(struct mlx5dr_match_param *value,
@ -1136,11 +1116,9 @@ static void dr_ste_build_eth_l3_ipv6_dst_bit_mask(struct mlx5dr_match_param *val
static int dr_ste_build_eth_l3_ipv6_dst_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *hw_ste_p)
u8 *tag)
{
struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
u8 *tag = hw_ste->tag;
DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_127_96, spec, dst_ip_127_96);
DR_STE_SET_TAG(eth_l3_ipv6_dst, tag, dst_ip_95_64, spec, dst_ip_95_64);
@ -1176,11 +1154,9 @@ static void dr_ste_build_eth_l3_ipv6_src_bit_mask(struct mlx5dr_match_param *val
static int dr_ste_build_eth_l3_ipv6_src_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *hw_ste_p)
u8 *tag)
{
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
u8 *tag = hw_ste->tag;
DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_127_96, spec, src_ip_127_96);
DR_STE_SET_TAG(eth_l3_ipv6_src, tag, src_ip_95_64, spec, src_ip_95_64);
@ -1238,11 +1214,9 @@ static void dr_ste_build_eth_l3_ipv4_5_tuple_bit_mask(struct mlx5dr_match_param
static int dr_ste_build_eth_l3_ipv4_5_tuple_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *hw_ste_p)
u8 *tag)
{
struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
u8 *tag = hw_ste->tag;
DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, destination_address, spec, dst_ip_31_0);
DR_STE_SET_TAG(eth_l3_ipv4_5_tuple, tag, source_address, spec, src_ip_31_0);
@ -1328,12 +1302,10 @@ dr_ste_build_eth_l2_src_or_dst_bit_mask(struct mlx5dr_match_param *value,
}
static int dr_ste_build_eth_l2_src_or_dst_tag(struct mlx5dr_match_param *value,
bool inner, u8 *hw_ste_p)
bool inner, u8 *tag)
{
struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
struct mlx5dr_match_spec *spec = inner ? &value->inner : &value->outer;
struct mlx5dr_match_misc *misc_spec = &value->misc;
u8 *tag = hw_ste->tag;
DR_STE_SET_TAG(eth_l2_src, tag, first_vlan_id, spec, first_vid);
DR_STE_SET_TAG(eth_l2_src, tag, first_cfi, spec, first_cfi);
@ -1403,16 +1375,14 @@ static void dr_ste_build_eth_l2_src_bit_mask(struct mlx5dr_match_param *value,
static int dr_ste_build_eth_l2_src_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *hw_ste_p)
u8 *tag)
{
struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
u8 *tag = hw_ste->tag;
DR_STE_SET_TAG(eth_l2_src, tag, smac_47_16, spec, smac_47_16);
DR_STE_SET_TAG(eth_l2_src, tag, smac_15_0, spec, smac_15_0);
return dr_ste_build_eth_l2_src_or_dst_tag(value, sb->inner, hw_ste_p);
return dr_ste_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
}
void mlx5dr_ste_build_eth_l2_src(struct mlx5dr_ste_build *sb,
@ -1440,16 +1410,14 @@ static void dr_ste_build_eth_l2_dst_bit_mask(struct mlx5dr_match_param *value,
static int dr_ste_build_eth_l2_dst_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *hw_ste_p)
u8 *tag)
{
struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
u8 *tag = hw_ste->tag;
DR_STE_SET_TAG(eth_l2_dst, tag, dmac_47_16, spec, dmac_47_16);
DR_STE_SET_TAG(eth_l2_dst, tag, dmac_15_0, spec, dmac_15_0);
return dr_ste_build_eth_l2_src_or_dst_tag(value, sb->inner, hw_ste_p);
return dr_ste_build_eth_l2_src_or_dst_tag(value, sb->inner, tag);
}
void mlx5dr_ste_build_eth_l2_dst(struct mlx5dr_ste_build *sb,
@ -1495,12 +1463,10 @@ static void dr_ste_build_eth_l2_tnl_bit_mask(struct mlx5dr_match_param *value,
static int dr_ste_build_eth_l2_tnl_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *hw_ste_p)
u8 *tag)
{
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
struct mlx5dr_match_misc *misc = &value->misc;
u8 *tag = hw_ste->tag;
DR_STE_SET_TAG(eth_l2_tnl, tag, dmac_47_16, spec, dmac_47_16);
DR_STE_SET_TAG(eth_l2_tnl, tag, dmac_15_0, spec, dmac_15_0);
@ -1561,11 +1527,9 @@ static void dr_ste_build_eth_l3_ipv4_misc_bit_mask(struct mlx5dr_match_param *va
static int dr_ste_build_eth_l3_ipv4_misc_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *hw_ste_p)
u8 *tag)
{
struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
u8 *tag = hw_ste->tag;
DR_STE_SET_TAG(eth_l3_ipv4_misc, tag, time_to_live, spec, ttl_hoplimit);
@ -1608,11 +1572,9 @@ static void dr_ste_build_ipv6_l3_l4_bit_mask(struct mlx5dr_match_param *value,
static int dr_ste_build_ipv6_l3_l4_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *hw_ste_p)
u8 *tag)
{
struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer;
struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
u8 *tag = hw_ste->tag;
DR_STE_SET_TAG(eth_l4, tag, dst_port, spec, tcp_dport);
DR_STE_SET_TAG(eth_l4, tag, src_port, spec, tcp_sport);
@ -1647,7 +1609,7 @@ void mlx5dr_ste_build_ipv6_l3_l4(struct mlx5dr_ste_build *sb,
static int dr_ste_build_empty_always_hit_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *hw_ste_p)
u8 *tag)
{
return 0;
}
@ -1673,11 +1635,9 @@ static void dr_ste_build_mpls_bit_mask(struct mlx5dr_match_param *value,
static int dr_ste_build_mpls_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *hw_ste_p)
u8 *tag)
{
struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
struct mlx5dr_match_misc2 *misc2_mask = &value->misc2;
u8 *tag = hw_ste->tag;
if (sb->inner)
DR_STE_SET_MPLS_TAG(mpls, misc2_mask, inner, tag);
@ -1716,11 +1676,9 @@ static void dr_ste_build_gre_bit_mask(struct mlx5dr_match_param *value,
static int dr_ste_build_gre_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *hw_ste_p)
u8 *tag)
{
struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
struct mlx5dr_match_misc *misc = &value->misc;
u8 *tag = hw_ste->tag;
DR_STE_SET_TAG(gre, tag, gre_protocol, misc, gre_protocol);
@ -1781,11 +1739,9 @@ static void dr_ste_build_flex_parser_0_bit_mask(struct mlx5dr_match_param *value
static int dr_ste_build_flex_parser_0_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *hw_ste_p)
u8 *tag)
{
struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
u8 *tag = hw_ste->tag;
if (DR_STE_IS_OUTER_MPLS_OVER_GRE_SET(misc_2_mask)) {
DR_STE_SET_TAG(flex_parser_0, tag, parser_3_label,
@ -1903,11 +1859,9 @@ static int dr_ste_build_flex_parser_1_bit_mask(struct mlx5dr_match_param *mask,
static int dr_ste_build_flex_parser_1_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *hw_ste_p)
u8 *tag)
{
struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
struct mlx5dr_match_misc3 *misc_3 = &value->misc3;
u8 *tag = hw_ste->tag;
u32 icmp_header_data;
int dw0_location;
int dw1_location;
@ -2007,11 +1961,9 @@ static void dr_ste_build_general_purpose_bit_mask(struct mlx5dr_match_param *val
static int dr_ste_build_general_purpose_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *hw_ste_p)
u8 *tag)
{
struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
struct mlx5dr_match_misc2 *misc_2_mask = &value->misc2;
u8 *tag = hw_ste->tag;
DR_STE_SET_TAG(general_purpose, tag, general_purpose_lookup_field,
misc_2_mask, metadata_reg_a);
@ -2052,11 +2004,9 @@ static void dr_ste_build_eth_l4_misc_bit_mask(struct mlx5dr_match_param *value,
static int dr_ste_build_eth_l4_misc_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *hw_ste_p)
u8 *tag)
{
struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
struct mlx5dr_match_misc3 *misc3 = &value->misc3;
u8 *tag = hw_ste->tag;
if (sb->inner) {
DR_STE_SET_TAG(eth_l4_misc, tag, seq_num, misc3, inner_tcp_seq_num);
@ -2102,11 +2052,9 @@ dr_ste_build_flex_parser_tnl_vxlan_gpe_bit_mask(struct mlx5dr_match_param *value
static int
dr_ste_build_flex_parser_tnl_vxlan_gpe_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *hw_ste_p)
u8 *tag)
{
struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
struct mlx5dr_match_misc3 *misc3 = &value->misc3;
u8 *tag = hw_ste->tag;
DR_STE_SET_TAG(flex_parser_tnl_vxlan_gpe, tag,
outer_vxlan_gpe_flags, misc3,
@ -2158,11 +2106,9 @@ dr_ste_build_flex_parser_tnl_geneve_bit_mask(struct mlx5dr_match_param *value,
static int
dr_ste_build_flex_parser_tnl_geneve_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *hw_ste_p)
u8 *tag)
{
struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
struct mlx5dr_match_misc *misc = &value->misc;
u8 *tag = hw_ste->tag;
DR_STE_SET_TAG(flex_parser_tnl_geneve, tag,
geneve_protocol_type, misc, geneve_protocol_type);
@ -2205,11 +2151,9 @@ static void dr_ste_build_register_0_bit_mask(struct mlx5dr_match_param *value,
static int dr_ste_build_register_0_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *hw_ste_p)
u8 *tag)
{
struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
struct mlx5dr_match_misc2 *misc2 = &value->misc2;
u8 *tag = hw_ste->tag;
DR_STE_SET_TAG(register_0, tag, register_0_h, misc2, metadata_reg_c_0);
DR_STE_SET_TAG(register_0, tag, register_0_l, misc2, metadata_reg_c_1);
@ -2249,11 +2193,9 @@ static void dr_ste_build_register_1_bit_mask(struct mlx5dr_match_param *value,
static int dr_ste_build_register_1_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *hw_ste_p)
u8 *tag)
{
struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
struct mlx5dr_match_misc2 *misc2 = &value->misc2;
u8 *tag = hw_ste->tag;
DR_STE_SET_TAG(register_1, tag, register_2_h, misc2, metadata_reg_c_4);
DR_STE_SET_TAG(register_1, tag, register_2_l, misc2, metadata_reg_c_5);
@ -2276,38 +2218,25 @@ void mlx5dr_ste_build_register_1(struct mlx5dr_ste_build *sb,
sb->ste_build_tag_func = &dr_ste_build_register_1_tag;
}
static int dr_ste_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param *value,
u8 *bit_mask)
static void dr_ste_build_src_gvmi_qpn_bit_mask(struct mlx5dr_match_param *value,
u8 *bit_mask)
{
struct mlx5dr_match_misc *misc_mask = &value->misc;
/* Partial misc source_port is not supported */
if (misc_mask->source_port && misc_mask->source_port != 0xffff)
return -EINVAL;
/* Partial misc source_eswitch_owner_vhca_id is not supported */
if (misc_mask->source_eswitch_owner_vhca_id &&
misc_mask->source_eswitch_owner_vhca_id != 0xffff)
return -EINVAL;
DR_STE_SET_MASK(src_gvmi_qp, bit_mask, source_gvmi, misc_mask, source_port);
DR_STE_SET_MASK(src_gvmi_qp, bit_mask, source_qp, misc_mask, source_sqn);
misc_mask->source_eswitch_owner_vhca_id = 0;
return 0;
}
static int dr_ste_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
struct mlx5dr_ste_build *sb,
u8 *hw_ste_p)
u8 *tag)
{
struct dr_hw_ste_format *hw_ste = (struct dr_hw_ste_format *)hw_ste_p;
struct mlx5dr_match_misc *misc = &value->misc;
struct mlx5dr_cmd_vport_cap *vport_cap;
struct mlx5dr_domain *dmn = sb->dmn;
struct mlx5dr_cmd_caps *caps;
u8 *bit_mask = sb->bit_mask;
u8 *tag = hw_ste->tag;
bool source_gvmi_set;
DR_STE_SET_TAG(src_gvmi_qp, tag, source_qp, misc, source_sqn);
@ -2339,19 +2268,15 @@ static int dr_ste_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
return 0;
}
int mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
struct mlx5dr_domain *dmn,
bool inner, bool rx)
void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
struct mlx5dr_domain *dmn,
bool inner, bool rx)
{
int ret;
/* Set vhca_id_valid before we reset source_eswitch_owner_vhca_id */
sb->vhca_id_valid = mask->misc.source_eswitch_owner_vhca_id;
ret = dr_ste_build_src_gvmi_qpn_bit_mask(mask, sb->bit_mask);
if (ret)
return ret;
dr_ste_build_src_gvmi_qpn_bit_mask(mask, sb->bit_mask);
sb->rx = rx;
sb->dmn = dmn;
@ -2359,6 +2284,4 @@ int mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_build *sb,
sb->lu_type = MLX5DR_STE_LU_TYPE_SRC_GVMI_AND_QP;
sb->byte_mask = dr_ste_conv_bit_to_byte_mask(sb->bit_mask);
sb->ste_build_tag_func = &dr_ste_build_src_gvmi_qpn_tag;
return 0;
}

View File

@ -194,7 +194,7 @@ struct mlx5dr_ste_build {
u8 bit_mask[DR_STE_SIZE_MASK];
int (*ste_build_tag_func)(struct mlx5dr_match_param *spec,
struct mlx5dr_ste_build *sb,
u8 *hw_ste_p);
u8 *tag);
};
struct mlx5dr_ste_htbl *
@ -227,7 +227,6 @@ void mlx5dr_ste_set_hit_gvmi(u8 *hw_ste_p, u16 gvmi);
void mlx5dr_ste_set_hit_addr(u8 *hw_ste, u64 icm_addr, u32 ht_size);
void mlx5dr_ste_always_miss_addr(struct mlx5dr_ste *ste, u64 miss_addr);
void mlx5dr_ste_set_bit_mask(u8 *hw_ste_p, u8 *bit_mask);
bool mlx5dr_ste_not_used_ste(struct mlx5dr_ste *ste);
bool mlx5dr_ste_is_last_in_rule(struct mlx5dr_matcher_rx_tx *nic_matcher,
u8 ste_location);
void mlx5dr_ste_rx_set_flow_tag(u8 *hw_ste_p, u32 flow_tag);
@ -266,6 +265,11 @@ static inline void mlx5dr_ste_get(struct mlx5dr_ste *ste)
ste->refcount++;
}
static inline bool mlx5dr_ste_is_not_used(struct mlx5dr_ste *ste)
{
return !ste->refcount;
}
void mlx5dr_ste_set_hit_addr_by_next_htbl(u8 *hw_ste,
struct mlx5dr_ste_htbl *next_htbl);
bool mlx5dr_ste_equal_tag(void *src, void *dst);
@ -284,9 +288,9 @@ int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher,
struct mlx5dr_matcher_rx_tx *nic_matcher,
struct mlx5dr_match_param *value,
u8 *ste_arr);
int mlx5dr_ste_build_eth_l2_src_des(struct mlx5dr_ste_build *builder,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
void mlx5dr_ste_build_eth_l2_src_des(struct mlx5dr_ste_build *builder,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
void mlx5dr_ste_build_eth_l3_ipv4_5_tuple(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
@ -342,10 +346,10 @@ void mlx5dr_ste_build_register_0(struct mlx5dr_ste_build *sb,
void mlx5dr_ste_build_register_1(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
bool inner, bool rx);
int mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
struct mlx5dr_domain *dmn,
bool inner, bool rx);
void mlx5dr_ste_build_src_gvmi_qpn(struct mlx5dr_ste_build *sb,
struct mlx5dr_match_param *mask,
struct mlx5dr_domain *dmn,
bool inner, bool rx);
void mlx5dr_ste_build_empty_always_hit(struct mlx5dr_ste_build *sb, bool rx);
/* Actions utils */
@ -793,6 +797,7 @@ struct mlx5dr_rule {
struct mlx5dr_rule_rx_tx rx;
struct mlx5dr_rule_rx_tx tx;
struct list_head rule_actions_list;
u32 flow_source;
};
void mlx5dr_rule_update_rule_member(struct mlx5dr_ste *new_ste,
@ -991,7 +996,6 @@ struct mlx5dr_icm_chunk *
mlx5dr_icm_alloc_chunk(struct mlx5dr_icm_pool *pool,
enum mlx5dr_icm_chunk_size chunk_size);
void mlx5dr_icm_free_chunk(struct mlx5dr_icm_chunk *chunk);
bool mlx5dr_ste_is_not_valid_entry(u8 *p_hw_ste);
int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn,
struct mlx5dr_domain_rx_tx *nic_dmn,
struct mlx5dr_ste_htbl *htbl,

View File

@ -487,7 +487,8 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns,
rule = mlx5dr_rule_create(group->fs_dr_matcher.dr_matcher,
&params,
num_actions,
actions);
actions,
fte->flow_context.flow_source);
if (!rule) {
err = -EINVAL;
goto free_actions;

View File

@ -67,7 +67,8 @@ struct mlx5dr_rule *
mlx5dr_rule_create(struct mlx5dr_matcher *matcher,
struct mlx5dr_match_parameters *value,
size_t num_actions,
struct mlx5dr_action *actions[]);
struct mlx5dr_action *actions[],
u32 flow_source);
int mlx5dr_rule_destroy(struct mlx5dr_rule *rule);

View File

@ -74,15 +74,16 @@ bool mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw);
bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw);
/* Reg C0 usage:
* Reg C0 = < ESW_VHCA_ID_BITS(8) | ESW_VPORT BITS(8) | ESW_CHAIN_TAG(16) >
* Reg C0 = < ESW_PFNUM_BITS(4) | ESW_VPORT BITS(12) | ESW_CHAIN_TAG(16) >
*
* Highest 8 bits of the reg c0 is the vhca_id, next 8 bits is vport_num,
* the rest (lowest 16 bits) is left for tc chain tag restoration.
* VHCA_ID + VPORT comprise the SOURCE_PORT matching.
* Highest 4 bits of the reg c0 is the PF_NUM (range 0-15), 12 bits of
* unique non-zero vport id (range 1-4095). The rest (lowest 16 bits) is left
* for tc chain tag restoration.
* PFNUM + VPORT comprise the SOURCE_PORT matching.
*/
#define ESW_VHCA_ID_BITS 8
#define ESW_VPORT_BITS 8
#define ESW_SOURCE_PORT_METADATA_BITS (ESW_VHCA_ID_BITS + ESW_VPORT_BITS)
#define ESW_VPORT_BITS 12
#define ESW_PFNUM_BITS 4
#define ESW_SOURCE_PORT_METADATA_BITS (ESW_PFNUM_BITS + ESW_VPORT_BITS)
#define ESW_SOURCE_PORT_METADATA_OFFSET (32 - ESW_SOURCE_PORT_METADATA_BITS)
#define ESW_CHAIN_TAG_METADATA_BITS (32 - ESW_SOURCE_PORT_METADATA_BITS)
#define ESW_CHAIN_TAG_METADATA_MASK GENMASK(ESW_CHAIN_TAG_METADATA_BITS - 1,\