mirror of https://gitee.com/openkylin/linux.git
net/mlx5: convert to new udp_tunnel infrastructure
Allocate nic_info dynamically - n_entries is not constant.
Attach the tunnel offload info only to the uplink representor.
We expect the "main" netdev to be unregistered in switchdev
mode, and there to be only one uplink representor.
Drop the udp_tunnel_drop_rx_info() call, it was not there until
commit b3c2ed21c0
("net/mlx5e: Fix VXLAN configuration restore after function reload")
so the device doesn't need it, and core should handle reloads and
reset just fine.
v2:
- don't drop the ndos on reprs, and register info on uplink repr.
v4:
- Move netdev tunnel structure handling to en_main.c
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: Saeed Mahameed <saeedm@mellanox.com>
This commit is contained in:
parent
966e505976
commit
18a2b7f969
|
@ -45,6 +45,7 @@
|
|||
#include <linux/mlx5/transobj.h>
|
||||
#include <linux/mlx5/fs.h>
|
||||
#include <linux/rhashtable.h>
|
||||
#include <net/udp_tunnel.h>
|
||||
#include <net/switchdev.h>
|
||||
#include <net/xdp.h>
|
||||
#include <linux/dim.h>
|
||||
|
@ -792,6 +793,7 @@ struct mlx5e_priv {
|
|||
u16 drop_rq_q_counter;
|
||||
struct notifier_block events_nb;
|
||||
|
||||
struct udp_tunnel_nic_info nic_info;
|
||||
#ifdef CONFIG_MLX5_CORE_EN_DCB
|
||||
struct mlx5e_dcbx dcbx;
|
||||
#endif
|
||||
|
@ -1012,6 +1014,7 @@ int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv);
|
|||
int mlx5e_set_dev_port_mtu_ctx(struct mlx5e_priv *priv, void *context);
|
||||
int mlx5e_change_mtu(struct net_device *netdev, int new_mtu,
|
||||
mlx5e_fp_preactivate preactivate);
|
||||
void mlx5e_vxlan_set_netdev_info(struct mlx5e_priv *priv);
|
||||
|
||||
/* ethtool helpers */
|
||||
void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
|
||||
|
@ -1080,8 +1083,6 @@ void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params,
|
|||
void mlx5e_rx_dim_work(struct work_struct *work);
|
||||
void mlx5e_tx_dim_work(struct work_struct *work);
|
||||
|
||||
void mlx5e_add_vxlan_port(struct net_device *netdev, struct udp_tunnel_info *ti);
|
||||
void mlx5e_del_vxlan_port(struct net_device *netdev, struct udp_tunnel_info *ti);
|
||||
netdev_features_t mlx5e_features_check(struct sk_buff *skb,
|
||||
struct net_device *netdev,
|
||||
netdev_features_t features);
|
||||
|
|
|
@ -4191,83 +4191,6 @@ int mlx5e_get_vf_stats(struct net_device *dev,
|
|||
}
|
||||
#endif
|
||||
|
||||
struct mlx5e_vxlan_work {
|
||||
struct work_struct work;
|
||||
struct mlx5e_priv *priv;
|
||||
u16 port;
|
||||
};
|
||||
|
||||
static void mlx5e_vxlan_add_work(struct work_struct *work)
|
||||
{
|
||||
struct mlx5e_vxlan_work *vxlan_work =
|
||||
container_of(work, struct mlx5e_vxlan_work, work);
|
||||
struct mlx5e_priv *priv = vxlan_work->priv;
|
||||
u16 port = vxlan_work->port;
|
||||
|
||||
mutex_lock(&priv->state_lock);
|
||||
mlx5_vxlan_add_port(priv->mdev->vxlan, port);
|
||||
mutex_unlock(&priv->state_lock);
|
||||
|
||||
kfree(vxlan_work);
|
||||
}
|
||||
|
||||
static void mlx5e_vxlan_del_work(struct work_struct *work)
|
||||
{
|
||||
struct mlx5e_vxlan_work *vxlan_work =
|
||||
container_of(work, struct mlx5e_vxlan_work, work);
|
||||
struct mlx5e_priv *priv = vxlan_work->priv;
|
||||
u16 port = vxlan_work->port;
|
||||
|
||||
mutex_lock(&priv->state_lock);
|
||||
mlx5_vxlan_del_port(priv->mdev->vxlan, port);
|
||||
mutex_unlock(&priv->state_lock);
|
||||
kfree(vxlan_work);
|
||||
}
|
||||
|
||||
static void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, u16 port, int add)
|
||||
{
|
||||
struct mlx5e_vxlan_work *vxlan_work;
|
||||
|
||||
vxlan_work = kmalloc(sizeof(*vxlan_work), GFP_ATOMIC);
|
||||
if (!vxlan_work)
|
||||
return;
|
||||
|
||||
if (add)
|
||||
INIT_WORK(&vxlan_work->work, mlx5e_vxlan_add_work);
|
||||
else
|
||||
INIT_WORK(&vxlan_work->work, mlx5e_vxlan_del_work);
|
||||
|
||||
vxlan_work->priv = priv;
|
||||
vxlan_work->port = port;
|
||||
queue_work(priv->wq, &vxlan_work->work);
|
||||
}
|
||||
|
||||
void mlx5e_add_vxlan_port(struct net_device *netdev, struct udp_tunnel_info *ti)
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
|
||||
if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
|
||||
return;
|
||||
|
||||
if (!mlx5_vxlan_allowed(priv->mdev->vxlan))
|
||||
return;
|
||||
|
||||
mlx5e_vxlan_queue_work(priv, be16_to_cpu(ti->port), 1);
|
||||
}
|
||||
|
||||
void mlx5e_del_vxlan_port(struct net_device *netdev, struct udp_tunnel_info *ti)
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
|
||||
if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
|
||||
return;
|
||||
|
||||
if (!mlx5_vxlan_allowed(priv->mdev->vxlan))
|
||||
return;
|
||||
|
||||
mlx5e_vxlan_queue_work(priv, be16_to_cpu(ti->port), 0);
|
||||
}
|
||||
|
||||
static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
|
||||
struct sk_buff *skb,
|
||||
netdev_features_t features)
|
||||
|
@ -4597,8 +4520,8 @@ const struct net_device_ops mlx5e_netdev_ops = {
|
|||
.ndo_change_mtu = mlx5e_change_nic_mtu,
|
||||
.ndo_do_ioctl = mlx5e_ioctl,
|
||||
.ndo_set_tx_maxrate = mlx5e_set_tx_maxrate,
|
||||
.ndo_udp_tunnel_add = mlx5e_add_vxlan_port,
|
||||
.ndo_udp_tunnel_del = mlx5e_del_vxlan_port,
|
||||
.ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
|
||||
.ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
|
||||
.ndo_features_check = mlx5e_features_check,
|
||||
.ndo_tx_timeout = mlx5e_tx_timeout,
|
||||
.ndo_bpf = mlx5e_xdp,
|
||||
|
@ -4869,6 +4792,39 @@ static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
|
|||
}
|
||||
}
|
||||
|
||||
static int mlx5e_vxlan_set_port(struct net_device *netdev, unsigned int table,
|
||||
unsigned int entry, struct udp_tunnel_info *ti)
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
|
||||
return mlx5_vxlan_add_port(priv->mdev->vxlan, ntohs(ti->port));
|
||||
}
|
||||
|
||||
static int mlx5e_vxlan_unset_port(struct net_device *netdev, unsigned int table,
|
||||
unsigned int entry, struct udp_tunnel_info *ti)
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
|
||||
return mlx5_vxlan_del_port(priv->mdev->vxlan, ntohs(ti->port));
|
||||
}
|
||||
|
||||
void mlx5e_vxlan_set_netdev_info(struct mlx5e_priv *priv)
|
||||
{
|
||||
if (!mlx5_vxlan_allowed(priv->mdev->vxlan))
|
||||
return;
|
||||
|
||||
priv->nic_info.set_port = mlx5e_vxlan_set_port;
|
||||
priv->nic_info.unset_port = mlx5e_vxlan_unset_port;
|
||||
priv->nic_info.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP |
|
||||
UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN;
|
||||
priv->nic_info.tables[0].tunnel_types = UDP_TUNNEL_TYPE_VXLAN;
|
||||
/* Don't count the space hard-coded to the IANA port */
|
||||
priv->nic_info.tables[0].n_entries =
|
||||
mlx5_vxlan_max_udp_ports(priv->mdev) - 1;
|
||||
|
||||
priv->netdev->udp_tunnel_nic_info = &priv->nic_info;
|
||||
}
|
||||
|
||||
static void mlx5e_build_nic_netdev(struct net_device *netdev)
|
||||
{
|
||||
struct mlx5e_priv *priv = netdev_priv(netdev);
|
||||
|
@ -4912,6 +4868,8 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
|
|||
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
|
||||
|
||||
mlx5e_vxlan_set_netdev_info(priv);
|
||||
|
||||
if (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev) ||
|
||||
mlx5e_any_tunnel_proto_supported(mdev)) {
|
||||
netdev->hw_enc_features |= NETIF_F_HW_CSUM;
|
||||
|
@ -5217,8 +5175,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
|
|||
rtnl_lock();
|
||||
if (netif_running(netdev))
|
||||
mlx5e_open(netdev);
|
||||
if (mlx5_vxlan_allowed(priv->mdev->vxlan))
|
||||
udp_tunnel_get_rx_info(netdev);
|
||||
udp_tunnel_nic_reset_ntf(priv->netdev);
|
||||
netif_device_attach(netdev);
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
@ -5233,8 +5190,6 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
|
|||
rtnl_lock();
|
||||
if (netif_running(priv->netdev))
|
||||
mlx5e_close(priv->netdev);
|
||||
if (mlx5_vxlan_allowed(priv->mdev->vxlan))
|
||||
udp_tunnel_drop_rx_info(priv->netdev);
|
||||
netif_device_detach(priv->netdev);
|
||||
rtnl_unlock();
|
||||
|
||||
|
|
|
@ -658,8 +658,8 @@ static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = {
|
|||
.ndo_has_offload_stats = mlx5e_rep_has_offload_stats,
|
||||
.ndo_get_offload_stats = mlx5e_rep_get_offload_stats,
|
||||
.ndo_change_mtu = mlx5e_uplink_rep_change_mtu,
|
||||
.ndo_udp_tunnel_add = mlx5e_add_vxlan_port,
|
||||
.ndo_udp_tunnel_del = mlx5e_del_vxlan_port,
|
||||
.ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
|
||||
.ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
|
||||
.ndo_features_check = mlx5e_features_check,
|
||||
.ndo_set_vf_mac = mlx5e_set_vf_mac,
|
||||
.ndo_set_vf_rate = mlx5e_set_vf_rate,
|
||||
|
@ -730,6 +730,7 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev)
|
|||
/* we want a persistent mac for the uplink rep */
|
||||
mlx5_query_mac_address(mdev, netdev->dev_addr);
|
||||
netdev->ethtool_ops = &mlx5e_uplink_rep_ethtool_ops;
|
||||
mlx5e_vxlan_set_netdev_info(priv);
|
||||
mlx5e_dcbnl_build_rep_netdev(netdev);
|
||||
} else {
|
||||
netdev->netdev_ops = &mlx5e_netdev_ops_rep;
|
||||
|
|
|
@ -42,21 +42,14 @@ struct mlx5_vxlan {
|
|||
struct mlx5_core_dev *mdev;
|
||||
/* max_num_ports is usuallly 4, 16 buckets is more than enough */
|
||||
DECLARE_HASHTABLE(htable, 4);
|
||||
int num_ports;
|
||||
struct mutex sync_lock; /* sync add/del port HW operations */
|
||||
};
|
||||
|
||||
struct mlx5_vxlan_port {
|
||||
struct hlist_node hlist;
|
||||
refcount_t refcount;
|
||||
u16 udp_port;
|
||||
};
|
||||
|
||||
static inline u8 mlx5_vxlan_max_udp_ports(struct mlx5_core_dev *mdev)
|
||||
{
|
||||
return MLX5_CAP_ETH(mdev, max_vxlan_udp_ports) ?: 4;
|
||||
}
|
||||
|
||||
static int mlx5_vxlan_core_add_port_cmd(struct mlx5_core_dev *mdev, u16 port)
|
||||
{
|
||||
u32 in[MLX5_ST_SZ_DW(add_vxlan_udp_dport_in)] = {};
|
||||
|
@ -109,48 +102,24 @@ static struct mlx5_vxlan_port *vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 p
|
|||
int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port)
|
||||
{
|
||||
struct mlx5_vxlan_port *vxlanp;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&vxlan->sync_lock);
|
||||
vxlanp = vxlan_lookup_port(vxlan, port);
|
||||
if (vxlanp) {
|
||||
refcount_inc(&vxlanp->refcount);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (vxlan->num_ports >= mlx5_vxlan_max_udp_ports(vxlan->mdev)) {
|
||||
mlx5_core_info(vxlan->mdev,
|
||||
"UDP port (%d) not offloaded, max number of UDP ports (%d) are already offloaded\n",
|
||||
port, mlx5_vxlan_max_udp_ports(vxlan->mdev));
|
||||
ret = -ENOSPC;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
ret = mlx5_vxlan_core_add_port_cmd(vxlan->mdev, port);
|
||||
if (ret)
|
||||
goto unlock;
|
||||
int ret;
|
||||
|
||||
vxlanp = kzalloc(sizeof(*vxlanp), GFP_KERNEL);
|
||||
if (!vxlanp) {
|
||||
ret = -ENOMEM;
|
||||
goto err_delete_port;
|
||||
if (!vxlanp)
|
||||
return -ENOMEM;
|
||||
vxlanp->udp_port = port;
|
||||
|
||||
ret = mlx5_vxlan_core_add_port_cmd(vxlan->mdev, port);
|
||||
if (ret) {
|
||||
kfree(vxlanp);
|
||||
return ret;
|
||||
}
|
||||
|
||||
vxlanp->udp_port = port;
|
||||
refcount_set(&vxlanp->refcount, 1);
|
||||
|
||||
mutex_lock(&vxlan->sync_lock);
|
||||
hash_add_rcu(vxlan->htable, &vxlanp->hlist, port);
|
||||
|
||||
vxlan->num_ports++;
|
||||
mutex_unlock(&vxlan->sync_lock);
|
||||
|
||||
return 0;
|
||||
|
||||
err_delete_port:
|
||||
mlx5_vxlan_core_del_port_cmd(vxlan->mdev, port);
|
||||
|
||||
unlock:
|
||||
mutex_unlock(&vxlan->sync_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port)
|
||||
|
@ -161,18 +130,15 @@ int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port)
|
|||
mutex_lock(&vxlan->sync_lock);
|
||||
|
||||
vxlanp = vxlan_lookup_port(vxlan, port);
|
||||
if (!vxlanp) {
|
||||
if (WARN_ON(!vxlanp)) {
|
||||
ret = -ENOENT;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (refcount_dec_and_test(&vxlanp->refcount)) {
|
||||
hash_del_rcu(&vxlanp->hlist);
|
||||
synchronize_rcu();
|
||||
mlx5_vxlan_core_del_port_cmd(vxlan->mdev, port);
|
||||
kfree(vxlanp);
|
||||
vxlan->num_ports--;
|
||||
}
|
||||
hash_del_rcu(&vxlanp->hlist);
|
||||
synchronize_rcu();
|
||||
mlx5_vxlan_core_del_port_cmd(vxlan->mdev, port);
|
||||
kfree(vxlanp);
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&vxlan->sync_lock);
|
||||
|
|
|
@ -37,6 +37,11 @@
|
|||
struct mlx5_vxlan;
|
||||
struct mlx5_vxlan_port;
|
||||
|
||||
static inline u8 mlx5_vxlan_max_udp_ports(struct mlx5_core_dev *mdev)
|
||||
{
|
||||
return MLX5_CAP_ETH(mdev, max_vxlan_udp_ports) ?: 4;
|
||||
}
|
||||
|
||||
static inline bool mlx5_vxlan_allowed(struct mlx5_vxlan *vxlan)
|
||||
{
|
||||
/* not allowed reason is encoded in vxlan pointer as error,
|
||||
|
|
Loading…
Reference in New Issue