mirror of https://gitee.com/openkylin/linux.git
Merge branch 'nfp-flower-improve-flower-resilience'
Jakub Kicinski says: ==================== nfp: flower: improve flower resilience This series contains mostly changes which improve nfp flower offload's resilience, but are too large or risky to push into net. Fred makes the driver waits for flower FW responses uninterruptible, and a little longer (~40ms). Pieter adds support for cards with multiple rule memories. John reworks the MAC offloads. He says: > When potential tunnel end-point MACs are offloaded, they are assigned an > index. This index may be associated with a port number meaning that if a > packet matches an offloaded MAC address on the card, then the ingress > port for that MAC can also be verified. In the case of shared MACs (e.g. > on a linux bond) there may be situations where this index maps to only > one of the ports that share the MAC. > > The idea of 'global' MAC indexes are supported that bypass the check on > ingress port on the NFP. The patchset tracks shared MACs and assigns > global indexes to these. It also ensures that port based indexes are > re-applied if a single port becomes the only user of an offloaded MAC. > > Other patches in the set aim to tidy code without changing functionality. > There is also a delete offload message introduced to ensure that MACs no > longer in use in kernel space are removed from the firmware lookup tables. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
159882f42c
|
@ -203,7 +203,7 @@ nfp_flower_cmsg_portreify_rx(struct nfp_app *app, struct sk_buff *skb)
|
|||
}
|
||||
|
||||
atomic_inc(&priv->reify_replies);
|
||||
wake_up_interruptible(&priv->reify_wait_queue);
|
||||
wake_up(&priv->reify_wait_queue);
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
|
@ -97,6 +97,9 @@
|
|||
|
||||
#define NFP_FLOWER_WORKQ_MAX_SKBS 30000
|
||||
|
||||
/* Cmesg reply (empirical) timeout*/
|
||||
#define NFP_FL_REPLY_TIMEOUT msecs_to_jiffies(40)
|
||||
|
||||
#define nfp_flower_cmsg_warn(app, fmt, args...) \
|
||||
do { \
|
||||
if (net_ratelimit()) \
|
||||
|
|
|
@ -32,6 +32,71 @@ static enum devlink_eswitch_mode eswitch_mode_get(struct nfp_app *app)
|
|||
return DEVLINK_ESWITCH_MODE_SWITCHDEV;
|
||||
}
|
||||
|
||||
static struct nfp_flower_non_repr_priv *
|
||||
nfp_flower_non_repr_priv_lookup(struct nfp_app *app, struct net_device *netdev)
|
||||
{
|
||||
struct nfp_flower_priv *priv = app->priv;
|
||||
struct nfp_flower_non_repr_priv *entry;
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
list_for_each_entry(entry, &priv->non_repr_priv, list)
|
||||
if (entry->netdev == netdev)
|
||||
return entry;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void
|
||||
__nfp_flower_non_repr_priv_get(struct nfp_flower_non_repr_priv *non_repr_priv)
|
||||
{
|
||||
non_repr_priv->ref_count++;
|
||||
}
|
||||
|
||||
struct nfp_flower_non_repr_priv *
|
||||
nfp_flower_non_repr_priv_get(struct nfp_app *app, struct net_device *netdev)
|
||||
{
|
||||
struct nfp_flower_priv *priv = app->priv;
|
||||
struct nfp_flower_non_repr_priv *entry;
|
||||
|
||||
entry = nfp_flower_non_repr_priv_lookup(app, netdev);
|
||||
if (entry)
|
||||
goto inc_ref;
|
||||
|
||||
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
|
||||
if (!entry)
|
||||
return NULL;
|
||||
|
||||
entry->netdev = netdev;
|
||||
list_add(&entry->list, &priv->non_repr_priv);
|
||||
|
||||
inc_ref:
|
||||
__nfp_flower_non_repr_priv_get(entry);
|
||||
return entry;
|
||||
}
|
||||
|
||||
void
|
||||
__nfp_flower_non_repr_priv_put(struct nfp_flower_non_repr_priv *non_repr_priv)
|
||||
{
|
||||
if (--non_repr_priv->ref_count)
|
||||
return;
|
||||
|
||||
list_del(&non_repr_priv->list);
|
||||
kfree(non_repr_priv);
|
||||
}
|
||||
|
||||
void
|
||||
nfp_flower_non_repr_priv_put(struct nfp_app *app, struct net_device *netdev)
|
||||
{
|
||||
struct nfp_flower_non_repr_priv *entry;
|
||||
|
||||
entry = nfp_flower_non_repr_priv_lookup(app, netdev);
|
||||
if (!entry)
|
||||
return;
|
||||
|
||||
__nfp_flower_non_repr_priv_put(entry);
|
||||
}
|
||||
|
||||
static enum nfp_repr_type
|
||||
nfp_flower_repr_get_type_and_port(struct nfp_app *app, u32 port_id, u8 *port)
|
||||
{
|
||||
|
@ -107,16 +172,14 @@ static int
|
|||
nfp_flower_wait_repr_reify(struct nfp_app *app, atomic_t *replies, int tot_repl)
|
||||
{
|
||||
struct nfp_flower_priv *priv = app->priv;
|
||||
int err;
|
||||
|
||||
if (!tot_repl)
|
||||
return 0;
|
||||
|
||||
lockdep_assert_held(&app->pf->lock);
|
||||
err = wait_event_interruptible_timeout(priv->reify_wait_queue,
|
||||
atomic_read(replies) >= tot_repl,
|
||||
msecs_to_jiffies(10));
|
||||
if (err <= 0) {
|
||||
if (!wait_event_timeout(priv->reify_wait_queue,
|
||||
atomic_read(replies) >= tot_repl,
|
||||
NFP_FL_REPLY_TIMEOUT)) {
|
||||
nfp_warn(app->cpp, "Not all reprs responded to reify\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
@ -223,6 +286,7 @@ nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
|
|||
|
||||
nfp_repr = netdev_priv(repr);
|
||||
nfp_repr->app_priv = repr_priv;
|
||||
repr_priv->nfp_repr = nfp_repr;
|
||||
|
||||
/* For now we only support 1 PF */
|
||||
WARN_ON(repr_type == NFP_REPR_TYPE_PF && i);
|
||||
|
@ -337,6 +401,7 @@ nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
|
|||
|
||||
nfp_repr = netdev_priv(repr);
|
||||
nfp_repr->app_priv = repr_priv;
|
||||
repr_priv->nfp_repr = nfp_repr;
|
||||
|
||||
port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT, repr);
|
||||
if (IS_ERR(port)) {
|
||||
|
@ -476,8 +541,8 @@ static int nfp_flower_vnic_init(struct nfp_app *app, struct nfp_net *nn)
|
|||
|
||||
static int nfp_flower_init(struct nfp_app *app)
|
||||
{
|
||||
u64 version, features, ctx_count, num_mems;
|
||||
const struct nfp_pf *pf = app->pf;
|
||||
u64 version, features, ctx_count;
|
||||
struct nfp_flower_priv *app_priv;
|
||||
int err;
|
||||
|
||||
|
@ -502,6 +567,23 @@ static int nfp_flower_init(struct nfp_app *app)
|
|||
return err;
|
||||
}
|
||||
|
||||
num_mems = nfp_rtsym_read_le(app->pf->rtbl, "CONFIG_FC_HOST_CTX_SPLIT",
|
||||
&err);
|
||||
if (err) {
|
||||
nfp_warn(app->cpp,
|
||||
"FlowerNIC: unsupported host context memory: %d\n",
|
||||
err);
|
||||
err = 0;
|
||||
num_mems = 1;
|
||||
}
|
||||
|
||||
if (!FIELD_FIT(NFP_FL_STAT_ID_MU_NUM, num_mems) || !num_mems) {
|
||||
nfp_warn(app->cpp,
|
||||
"FlowerNIC: invalid host context memory: %llu\n",
|
||||
num_mems);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ctx_count = nfp_rtsym_read_le(app->pf->rtbl, "CONFIG_FC_HOST_CTX_COUNT",
|
||||
&err);
|
||||
if (err) {
|
||||
|
@ -522,6 +604,8 @@ static int nfp_flower_init(struct nfp_app *app)
|
|||
if (!app_priv)
|
||||
return -ENOMEM;
|
||||
|
||||
app_priv->total_mem_units = num_mems;
|
||||
app_priv->active_mem_unit = 0;
|
||||
app_priv->stats_ring_size = roundup_pow_of_two(ctx_count);
|
||||
app->priv = app_priv;
|
||||
app_priv->app = app;
|
||||
|
@ -533,7 +617,7 @@ static int nfp_flower_init(struct nfp_app *app)
|
|||
init_waitqueue_head(&app_priv->mtu_conf.wait_q);
|
||||
spin_lock_init(&app_priv->mtu_conf.lock);
|
||||
|
||||
err = nfp_flower_metadata_init(app, ctx_count);
|
||||
err = nfp_flower_metadata_init(app, ctx_count, num_mems);
|
||||
if (err)
|
||||
goto err_free_app_priv;
|
||||
|
||||
|
@ -558,6 +642,7 @@ static int nfp_flower_init(struct nfp_app *app)
|
|||
}
|
||||
|
||||
INIT_LIST_HEAD(&app_priv->indr_block_cb_priv);
|
||||
INIT_LIST_HEAD(&app_priv->non_repr_priv);
|
||||
|
||||
return 0;
|
||||
|
||||
|
@ -601,7 +686,7 @@ nfp_flower_repr_change_mtu(struct nfp_app *app, struct net_device *netdev,
|
|||
{
|
||||
struct nfp_flower_priv *app_priv = app->priv;
|
||||
struct nfp_repr *repr = netdev_priv(netdev);
|
||||
int err, ack;
|
||||
int err;
|
||||
|
||||
/* Only need to config FW for physical port MTU change. */
|
||||
if (repr->port->type != NFP_PORT_PHYS_PORT)
|
||||
|
@ -628,11 +713,9 @@ nfp_flower_repr_change_mtu(struct nfp_app *app, struct net_device *netdev,
|
|||
}
|
||||
|
||||
/* Wait for fw to ack the change. */
|
||||
ack = wait_event_timeout(app_priv->mtu_conf.wait_q,
|
||||
nfp_flower_check_ack(app_priv),
|
||||
msecs_to_jiffies(10));
|
||||
|
||||
if (!ack) {
|
||||
if (!wait_event_timeout(app_priv->mtu_conf.wait_q,
|
||||
nfp_flower_check_ack(app_priv),
|
||||
NFP_FL_REPLY_TIMEOUT)) {
|
||||
spin_lock_bh(&app_priv->mtu_conf.lock);
|
||||
app_priv->mtu_conf.requested_val = 0;
|
||||
spin_unlock_bh(&app_priv->mtu_conf.lock);
|
||||
|
|
|
@ -20,6 +20,9 @@ struct nfp_fl_pre_lag;
|
|||
struct net_device;
|
||||
struct nfp_app;
|
||||
|
||||
#define NFP_FL_STAT_ID_MU_NUM GENMASK(31, 22)
|
||||
#define NFP_FL_STAT_ID_STAT GENMASK(21, 0)
|
||||
|
||||
#define NFP_FL_STATS_ELEM_RS FIELD_SIZEOF(struct nfp_fl_stats_id, \
|
||||
init_unalloc)
|
||||
#define NFP_FLOWER_MASK_ENTRY_RS 256
|
||||
|
@ -53,6 +56,26 @@ struct nfp_fl_stats_id {
|
|||
u8 repeated_em_count;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct nfp_fl_tunnel_offloads - priv data for tunnel offloads
|
||||
* @offloaded_macs: Hashtable of the offloaded MAC addresses
|
||||
* @ipv4_off_list: List of IPv4 addresses to offload
|
||||
* @neigh_off_list: List of neighbour offloads
|
||||
* @ipv4_off_lock: Lock for the IPv4 address list
|
||||
* @neigh_off_lock: Lock for the neighbour address list
|
||||
* @mac_off_ids: IDA to manage id assignment for offloaded MACs
|
||||
* @neigh_nb: Notifier to monitor neighbour state
|
||||
*/
|
||||
struct nfp_fl_tunnel_offloads {
|
||||
struct rhashtable offloaded_macs;
|
||||
struct list_head ipv4_off_list;
|
||||
struct list_head neigh_off_list;
|
||||
struct mutex ipv4_off_lock;
|
||||
spinlock_t neigh_off_lock;
|
||||
struct ida mac_off_ids;
|
||||
struct notifier_block neigh_nb;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct nfp_mtu_conf - manage MTU setting
|
||||
* @portnum: NFP port number of repr with requested MTU change
|
||||
|
@ -113,23 +136,16 @@ struct nfp_fl_lag {
|
|||
* processing
|
||||
* @cmsg_skbs_low: List of lower priority skbs for control message
|
||||
* processing
|
||||
* @nfp_mac_off_list: List of MAC addresses to offload
|
||||
* @nfp_mac_index_list: List of unique 8-bit indexes for non NFP netdevs
|
||||
* @nfp_ipv4_off_list: List of IPv4 addresses to offload
|
||||
* @nfp_neigh_off_list: List of neighbour offloads
|
||||
* @nfp_mac_off_lock: Lock for the MAC address list
|
||||
* @nfp_mac_index_lock: Lock for the MAC index list
|
||||
* @nfp_ipv4_off_lock: Lock for the IPv4 address list
|
||||
* @nfp_neigh_off_lock: Lock for the neighbour address list
|
||||
* @nfp_mac_off_ids: IDA to manage id assignment for offloaded macs
|
||||
* @nfp_mac_off_count: Number of MACs in address list
|
||||
* @nfp_tun_neigh_nb: Notifier to monitor neighbour state
|
||||
* @tun: Tunnel offload data
|
||||
* @reify_replies: atomically stores the number of replies received
|
||||
* from firmware for repr reify
|
||||
* @reify_wait_queue: wait queue for repr reify response counting
|
||||
* @mtu_conf: Configuration of repr MTU value
|
||||
* @nfp_lag: Link aggregation data block
|
||||
* @indr_block_cb_priv: List of priv data passed to indirect block cbs
|
||||
* @non_repr_priv: List of offloaded non-repr ports and their priv data
|
||||
* @active_mem_unit: Current active memory unit for flower rules
|
||||
* @total_mem_units: Total number of available memory units for flower rules
|
||||
*/
|
||||
struct nfp_flower_priv {
|
||||
struct nfp_app *app;
|
||||
|
@ -147,30 +163,47 @@ struct nfp_flower_priv {
|
|||
struct work_struct cmsg_work;
|
||||
struct sk_buff_head cmsg_skbs_high;
|
||||
struct sk_buff_head cmsg_skbs_low;
|
||||
struct list_head nfp_mac_off_list;
|
||||
struct list_head nfp_mac_index_list;
|
||||
struct list_head nfp_ipv4_off_list;
|
||||
struct list_head nfp_neigh_off_list;
|
||||
struct mutex nfp_mac_off_lock;
|
||||
struct mutex nfp_mac_index_lock;
|
||||
struct mutex nfp_ipv4_off_lock;
|
||||
spinlock_t nfp_neigh_off_lock;
|
||||
struct ida nfp_mac_off_ids;
|
||||
int nfp_mac_off_count;
|
||||
struct notifier_block nfp_tun_neigh_nb;
|
||||
struct nfp_fl_tunnel_offloads tun;
|
||||
atomic_t reify_replies;
|
||||
wait_queue_head_t reify_wait_queue;
|
||||
struct nfp_mtu_conf mtu_conf;
|
||||
struct nfp_fl_lag nfp_lag;
|
||||
struct list_head indr_block_cb_priv;
|
||||
struct list_head non_repr_priv;
|
||||
unsigned int active_mem_unit;
|
||||
unsigned int total_mem_units;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct nfp_flower_repr_priv - Flower APP per-repr priv data
|
||||
* @nfp_repr: Back pointer to nfp_repr
|
||||
* @lag_port_flags: Extended port flags to record lag state of repr
|
||||
* @mac_offloaded: Flag indicating a MAC address is offloaded for repr
|
||||
* @offloaded_mac_addr: MAC address that has been offloaded for repr
|
||||
* @mac_list: List entry of reprs that share the same offloaded MAC
|
||||
*/
|
||||
struct nfp_flower_repr_priv {
|
||||
struct nfp_repr *nfp_repr;
|
||||
unsigned long lag_port_flags;
|
||||
bool mac_offloaded;
|
||||
u8 offloaded_mac_addr[ETH_ALEN];
|
||||
struct list_head mac_list;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct nfp_flower_non_repr_priv - Priv data for non-repr offloaded ports
|
||||
* @list: List entry of offloaded reprs
|
||||
* @netdev: Pointer to non-repr net_device
|
||||
* @ref_count: Number of references held for this priv data
|
||||
* @mac_offloaded: Flag indicating a MAC address is offloaded for device
|
||||
* @offloaded_mac_addr: MAC address that has been offloaded for dev
|
||||
*/
|
||||
struct nfp_flower_non_repr_priv {
|
||||
struct list_head list;
|
||||
struct net_device *netdev;
|
||||
int ref_count;
|
||||
bool mac_offloaded;
|
||||
u8 offloaded_mac_addr[ETH_ALEN];
|
||||
};
|
||||
|
||||
struct nfp_fl_key_ls {
|
||||
|
@ -217,7 +250,8 @@ struct nfp_fl_stats_frame {
|
|||
__be64 stats_cookie;
|
||||
};
|
||||
|
||||
int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count);
|
||||
int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
|
||||
unsigned int host_ctx_split);
|
||||
void nfp_flower_metadata_cleanup(struct nfp_app *app);
|
||||
|
||||
int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
|
||||
|
@ -252,7 +286,6 @@ void nfp_tunnel_config_stop(struct nfp_app *app);
|
|||
int nfp_tunnel_mac_event_handler(struct nfp_app *app,
|
||||
struct net_device *netdev,
|
||||
unsigned long event, void *ptr);
|
||||
void nfp_tunnel_write_macs(struct nfp_app *app);
|
||||
void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4);
|
||||
void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4);
|
||||
void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb);
|
||||
|
@ -273,4 +306,12 @@ int nfp_flower_reg_indir_block_handler(struct nfp_app *app,
|
|||
struct net_device *netdev,
|
||||
unsigned long event);
|
||||
|
||||
void
|
||||
__nfp_flower_non_repr_priv_get(struct nfp_flower_non_repr_priv *non_repr_priv);
|
||||
struct nfp_flower_non_repr_priv *
|
||||
nfp_flower_non_repr_priv_get(struct nfp_app *app, struct net_device *netdev);
|
||||
void
|
||||
__nfp_flower_non_repr_priv_put(struct nfp_flower_non_repr_priv *non_repr_priv);
|
||||
void
|
||||
nfp_flower_non_repr_priv_put(struct nfp_app *app, struct net_device *netdev);
|
||||
#endif
|
||||
|
|
|
@ -403,9 +403,6 @@ int nfp_flower_compile_flow_match(struct nfp_app *app,
|
|||
ext += sizeof(struct nfp_flower_ipv4_udp_tun);
|
||||
msk += sizeof(struct nfp_flower_ipv4_udp_tun);
|
||||
|
||||
/* Configure tunnel end point MAC. */
|
||||
nfp_tunnel_write_macs(app);
|
||||
|
||||
/* Store the tunnel destination in the rule data.
|
||||
* This must be present and be an exact match.
|
||||
*/
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
#include <linux/hash.h>
|
||||
#include <linux/hashtable.h>
|
||||
#include <linux/jhash.h>
|
||||
#include <linux/math64.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <net/pkt_cls.h>
|
||||
|
||||
|
@ -52,8 +53,17 @@ static int nfp_get_stats_entry(struct nfp_app *app, u32 *stats_context_id)
|
|||
freed_stats_id = priv->stats_ring_size;
|
||||
/* Check for unallocated entries first. */
|
||||
if (priv->stats_ids.init_unalloc > 0) {
|
||||
*stats_context_id = priv->stats_ids.init_unalloc - 1;
|
||||
priv->stats_ids.init_unalloc--;
|
||||
if (priv->active_mem_unit == priv->total_mem_units) {
|
||||
priv->stats_ids.init_unalloc--;
|
||||
priv->active_mem_unit = 0;
|
||||
}
|
||||
|
||||
*stats_context_id =
|
||||
FIELD_PREP(NFP_FL_STAT_ID_STAT,
|
||||
priv->stats_ids.init_unalloc - 1) |
|
||||
FIELD_PREP(NFP_FL_STAT_ID_MU_NUM,
|
||||
priv->active_mem_unit);
|
||||
priv->active_mem_unit++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -381,10 +391,11 @@ const struct rhashtable_params nfp_flower_table_params = {
|
|||
.automatic_shrinking = true,
|
||||
};
|
||||
|
||||
int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count)
|
||||
int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count,
|
||||
unsigned int host_num_mems)
|
||||
{
|
||||
struct nfp_flower_priv *priv = app->priv;
|
||||
int err;
|
||||
int err, stats_size;
|
||||
|
||||
hash_init(priv->mask_table);
|
||||
|
||||
|
@ -417,10 +428,12 @@ int nfp_flower_metadata_init(struct nfp_app *app, u64 host_ctx_count)
|
|||
if (!priv->stats_ids.free_list.buf)
|
||||
goto err_free_last_used;
|
||||
|
||||
priv->stats_ids.init_unalloc = host_ctx_count;
|
||||
priv->stats_ids.init_unalloc = div_u64(host_ctx_count, host_num_mems);
|
||||
|
||||
priv->stats = kvmalloc_array(priv->stats_ring_size,
|
||||
sizeof(struct nfp_fl_stats), GFP_KERNEL);
|
||||
stats_size = FIELD_PREP(NFP_FL_STAT_ID_STAT, host_ctx_count) |
|
||||
FIELD_PREP(NFP_FL_STAT_ID_MU_NUM, host_num_mems - 1);
|
||||
priv->stats = kvmalloc_array(stats_size, sizeof(struct nfp_fl_stats),
|
||||
GFP_KERNEL);
|
||||
if (!priv->stats)
|
||||
goto err_free_ring_buf;
|
||||
|
||||
|
|
|
@ -98,47 +98,51 @@ struct nfp_ipv4_addr_entry {
|
|||
struct list_head list;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct nfp_tun_mac_addr - configure MAC address of tunnel EP on NFP
|
||||
* @reserved: reserved for future use
|
||||
* @count: number of MAC addresses in the message
|
||||
* @addresses.index: index of MAC address in the lookup table
|
||||
* @addresses.addr: interface MAC address
|
||||
* @addresses: series of MACs to offload
|
||||
*/
|
||||
struct nfp_tun_mac_addr {
|
||||
__be16 reserved;
|
||||
__be16 count;
|
||||
struct index_mac_addr {
|
||||
__be16 index;
|
||||
u8 addr[ETH_ALEN];
|
||||
} addresses[];
|
||||
};
|
||||
#define NFP_TUN_MAC_OFFLOAD_DEL_FLAG 0x2
|
||||
|
||||
/**
|
||||
* struct nfp_tun_mac_offload_entry - list of MACs to offload
|
||||
* @index: index of MAC address for offloading
|
||||
* struct nfp_tun_mac_addr_offload - configure MAC address of tunnel EP on NFP
|
||||
* @flags: MAC address offload options
|
||||
* @count: number of MAC addresses in the message (should be 1)
|
||||
* @index: index of MAC address in the lookup table
|
||||
* @addr: interface MAC address
|
||||
* @list: list pointer
|
||||
*/
|
||||
struct nfp_tun_mac_offload_entry {
|
||||
struct nfp_tun_mac_addr_offload {
|
||||
__be16 flags;
|
||||
__be16 count;
|
||||
__be16 index;
|
||||
u8 addr[ETH_ALEN];
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
enum nfp_flower_mac_offload_cmd {
|
||||
NFP_TUNNEL_MAC_OFFLOAD_ADD = 0,
|
||||
NFP_TUNNEL_MAC_OFFLOAD_DEL = 1,
|
||||
NFP_TUNNEL_MAC_OFFLOAD_MOD = 2,
|
||||
};
|
||||
|
||||
#define NFP_MAX_MAC_INDEX 0xff
|
||||
|
||||
/**
|
||||
* struct nfp_tun_mac_non_nfp_idx - converts non NFP netdev ifindex to 8-bit id
|
||||
* @ifindex: netdev ifindex of the device
|
||||
* @index: index of netdevs mac on NFP
|
||||
* @list: list pointer
|
||||
* struct nfp_tun_offloaded_mac - hashtable entry for an offloaded MAC
|
||||
* @ht_node: Hashtable entry
|
||||
* @addr: Offloaded MAC address
|
||||
* @index: Offloaded index for given MAC address
|
||||
* @ref_count: Number of devs using this MAC address
|
||||
* @repr_list: List of reprs sharing this MAC address
|
||||
*/
|
||||
struct nfp_tun_mac_non_nfp_idx {
|
||||
int ifindex;
|
||||
u8 index;
|
||||
struct list_head list;
|
||||
struct nfp_tun_offloaded_mac {
|
||||
struct rhash_head ht_node;
|
||||
u8 addr[ETH_ALEN];
|
||||
u16 index;
|
||||
int ref_count;
|
||||
struct list_head repr_list;
|
||||
};
|
||||
|
||||
static const struct rhashtable_params offloaded_macs_params = {
|
||||
.key_offset = offsetof(struct nfp_tun_offloaded_mac, addr),
|
||||
.head_offset = offsetof(struct nfp_tun_offloaded_mac, ht_node),
|
||||
.key_len = ETH_ALEN,
|
||||
.automatic_shrinking = true,
|
||||
};
|
||||
|
||||
void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb)
|
||||
|
@ -205,15 +209,15 @@ static bool nfp_tun_has_route(struct nfp_app *app, __be32 ipv4_addr)
|
|||
struct nfp_ipv4_route_entry *entry;
|
||||
struct list_head *ptr, *storage;
|
||||
|
||||
spin_lock_bh(&priv->nfp_neigh_off_lock);
|
||||
list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
|
||||
spin_lock_bh(&priv->tun.neigh_off_lock);
|
||||
list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) {
|
||||
entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
|
||||
if (entry->ipv4_addr == ipv4_addr) {
|
||||
spin_unlock_bh(&priv->nfp_neigh_off_lock);
|
||||
spin_unlock_bh(&priv->tun.neigh_off_lock);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
spin_unlock_bh(&priv->nfp_neigh_off_lock);
|
||||
spin_unlock_bh(&priv->tun.neigh_off_lock);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -223,24 +227,24 @@ static void nfp_tun_add_route_to_cache(struct nfp_app *app, __be32 ipv4_addr)
|
|||
struct nfp_ipv4_route_entry *entry;
|
||||
struct list_head *ptr, *storage;
|
||||
|
||||
spin_lock_bh(&priv->nfp_neigh_off_lock);
|
||||
list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
|
||||
spin_lock_bh(&priv->tun.neigh_off_lock);
|
||||
list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) {
|
||||
entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
|
||||
if (entry->ipv4_addr == ipv4_addr) {
|
||||
spin_unlock_bh(&priv->nfp_neigh_off_lock);
|
||||
spin_unlock_bh(&priv->tun.neigh_off_lock);
|
||||
return;
|
||||
}
|
||||
}
|
||||
entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
|
||||
if (!entry) {
|
||||
spin_unlock_bh(&priv->nfp_neigh_off_lock);
|
||||
spin_unlock_bh(&priv->tun.neigh_off_lock);
|
||||
nfp_flower_cmsg_warn(app, "Mem error when storing new route.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
entry->ipv4_addr = ipv4_addr;
|
||||
list_add_tail(&entry->list, &priv->nfp_neigh_off_list);
|
||||
spin_unlock_bh(&priv->nfp_neigh_off_lock);
|
||||
list_add_tail(&entry->list, &priv->tun.neigh_off_list);
|
||||
spin_unlock_bh(&priv->tun.neigh_off_lock);
|
||||
}
|
||||
|
||||
static void nfp_tun_del_route_from_cache(struct nfp_app *app, __be32 ipv4_addr)
|
||||
|
@ -249,8 +253,8 @@ static void nfp_tun_del_route_from_cache(struct nfp_app *app, __be32 ipv4_addr)
|
|||
struct nfp_ipv4_route_entry *entry;
|
||||
struct list_head *ptr, *storage;
|
||||
|
||||
spin_lock_bh(&priv->nfp_neigh_off_lock);
|
||||
list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
|
||||
spin_lock_bh(&priv->tun.neigh_off_lock);
|
||||
list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) {
|
||||
entry = list_entry(ptr, struct nfp_ipv4_route_entry, list);
|
||||
if (entry->ipv4_addr == ipv4_addr) {
|
||||
list_del(&entry->list);
|
||||
|
@ -258,7 +262,7 @@ static void nfp_tun_del_route_from_cache(struct nfp_app *app, __be32 ipv4_addr)
|
|||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock_bh(&priv->nfp_neigh_off_lock);
|
||||
spin_unlock_bh(&priv->tun.neigh_off_lock);
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -326,7 +330,7 @@ nfp_tun_neigh_event_handler(struct notifier_block *nb, unsigned long event,
|
|||
if (!nfp_netdev_is_nfp_repr(n->dev))
|
||||
return NOTIFY_DONE;
|
||||
|
||||
app_priv = container_of(nb, struct nfp_flower_priv, nfp_tun_neigh_nb);
|
||||
app_priv = container_of(nb, struct nfp_flower_priv, tun.neigh_nb);
|
||||
app = app_priv->app;
|
||||
|
||||
/* Only concerned with changes to routes already added to NFP. */
|
||||
|
@ -401,11 +405,11 @@ static void nfp_tun_write_ipv4_list(struct nfp_app *app)
|
|||
int count;
|
||||
|
||||
memset(&payload, 0, sizeof(struct nfp_tun_ipv4_addr));
|
||||
mutex_lock(&priv->nfp_ipv4_off_lock);
|
||||
mutex_lock(&priv->tun.ipv4_off_lock);
|
||||
count = 0;
|
||||
list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
|
||||
list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) {
|
||||
if (count >= NFP_FL_IPV4_ADDRS_MAX) {
|
||||
mutex_unlock(&priv->nfp_ipv4_off_lock);
|
||||
mutex_unlock(&priv->tun.ipv4_off_lock);
|
||||
nfp_flower_cmsg_warn(app, "IPv4 offload exceeds limit.\n");
|
||||
return;
|
||||
}
|
||||
|
@ -413,7 +417,7 @@ static void nfp_tun_write_ipv4_list(struct nfp_app *app)
|
|||
payload.ipv4_addr[count++] = entry->ipv4_addr;
|
||||
}
|
||||
payload.count = cpu_to_be32(count);
|
||||
mutex_unlock(&priv->nfp_ipv4_off_lock);
|
||||
mutex_unlock(&priv->tun.ipv4_off_lock);
|
||||
|
||||
nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_IPS,
|
||||
sizeof(struct nfp_tun_ipv4_addr),
|
||||
|
@ -426,26 +430,26 @@ void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4)
|
|||
struct nfp_ipv4_addr_entry *entry;
|
||||
struct list_head *ptr, *storage;
|
||||
|
||||
mutex_lock(&priv->nfp_ipv4_off_lock);
|
||||
list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
|
||||
mutex_lock(&priv->tun.ipv4_off_lock);
|
||||
list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) {
|
||||
entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
|
||||
if (entry->ipv4_addr == ipv4) {
|
||||
entry->ref_count++;
|
||||
mutex_unlock(&priv->nfp_ipv4_off_lock);
|
||||
mutex_unlock(&priv->tun.ipv4_off_lock);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
|
||||
if (!entry) {
|
||||
mutex_unlock(&priv->nfp_ipv4_off_lock);
|
||||
mutex_unlock(&priv->tun.ipv4_off_lock);
|
||||
nfp_flower_cmsg_warn(app, "Mem error when offloading IP address.\n");
|
||||
return;
|
||||
}
|
||||
entry->ipv4_addr = ipv4;
|
||||
entry->ref_count = 1;
|
||||
list_add_tail(&entry->list, &priv->nfp_ipv4_off_list);
|
||||
mutex_unlock(&priv->nfp_ipv4_off_lock);
|
||||
list_add_tail(&entry->list, &priv->tun.ipv4_off_list);
|
||||
mutex_unlock(&priv->tun.ipv4_off_lock);
|
||||
|
||||
nfp_tun_write_ipv4_list(app);
|
||||
}
|
||||
|
@ -456,8 +460,8 @@ void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4)
|
|||
struct nfp_ipv4_addr_entry *entry;
|
||||
struct list_head *ptr, *storage;
|
||||
|
||||
mutex_lock(&priv->nfp_ipv4_off_lock);
|
||||
list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
|
||||
mutex_lock(&priv->tun.ipv4_off_lock);
|
||||
list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) {
|
||||
entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
|
||||
if (entry->ipv4_addr == ipv4) {
|
||||
entry->ref_count--;
|
||||
|
@ -468,191 +472,357 @@ void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4)
|
|||
break;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&priv->nfp_ipv4_off_lock);
|
||||
mutex_unlock(&priv->tun.ipv4_off_lock);
|
||||
|
||||
nfp_tun_write_ipv4_list(app);
|
||||
}
|
||||
|
||||
void nfp_tunnel_write_macs(struct nfp_app *app)
|
||||
static int
|
||||
__nfp_tunnel_offload_mac(struct nfp_app *app, u8 *mac, u16 idx, bool del)
|
||||
{
|
||||
struct nfp_flower_priv *priv = app->priv;
|
||||
struct nfp_tun_mac_offload_entry *entry;
|
||||
struct nfp_tun_mac_addr *payload;
|
||||
struct list_head *ptr, *storage;
|
||||
int mac_count, err, pay_size;
|
||||
struct nfp_tun_mac_addr_offload payload;
|
||||
|
||||
mutex_lock(&priv->nfp_mac_off_lock);
|
||||
if (!priv->nfp_mac_off_count) {
|
||||
mutex_unlock(&priv->nfp_mac_off_lock);
|
||||
return;
|
||||
}
|
||||
memset(&payload, 0, sizeof(payload));
|
||||
|
||||
pay_size = sizeof(struct nfp_tun_mac_addr) +
|
||||
sizeof(struct index_mac_addr) * priv->nfp_mac_off_count;
|
||||
if (del)
|
||||
payload.flags = cpu_to_be16(NFP_TUN_MAC_OFFLOAD_DEL_FLAG);
|
||||
|
||||
payload = kzalloc(pay_size, GFP_KERNEL);
|
||||
if (!payload) {
|
||||
mutex_unlock(&priv->nfp_mac_off_lock);
|
||||
return;
|
||||
}
|
||||
/* FW supports multiple MACs per cmsg but restrict to single. */
|
||||
payload.count = cpu_to_be16(1);
|
||||
payload.index = cpu_to_be16(idx);
|
||||
ether_addr_copy(payload.addr, mac);
|
||||
|
||||
payload->count = cpu_to_be16(priv->nfp_mac_off_count);
|
||||
|
||||
mac_count = 0;
|
||||
list_for_each_safe(ptr, storage, &priv->nfp_mac_off_list) {
|
||||
entry = list_entry(ptr, struct nfp_tun_mac_offload_entry,
|
||||
list);
|
||||
payload->addresses[mac_count].index = entry->index;
|
||||
ether_addr_copy(payload->addresses[mac_count].addr,
|
||||
entry->addr);
|
||||
mac_count++;
|
||||
}
|
||||
|
||||
err = nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_MAC,
|
||||
pay_size, payload, GFP_KERNEL);
|
||||
|
||||
kfree(payload);
|
||||
|
||||
if (err) {
|
||||
mutex_unlock(&priv->nfp_mac_off_lock);
|
||||
/* Write failed so retain list for future retry. */
|
||||
return;
|
||||
}
|
||||
|
||||
/* If list was successfully offloaded, flush it. */
|
||||
list_for_each_safe(ptr, storage, &priv->nfp_mac_off_list) {
|
||||
entry = list_entry(ptr, struct nfp_tun_mac_offload_entry,
|
||||
list);
|
||||
list_del(&entry->list);
|
||||
kfree(entry);
|
||||
}
|
||||
|
||||
priv->nfp_mac_off_count = 0;
|
||||
mutex_unlock(&priv->nfp_mac_off_lock);
|
||||
return nfp_flower_xmit_tun_conf(app, NFP_FLOWER_CMSG_TYPE_TUN_MAC,
|
||||
sizeof(struct nfp_tun_mac_addr_offload),
|
||||
&payload, GFP_KERNEL);
|
||||
}
|
||||
|
||||
static int nfp_tun_get_mac_idx(struct nfp_app *app, int ifindex)
|
||||
static bool nfp_tunnel_port_is_phy_repr(int port)
|
||||
{
|
||||
struct nfp_flower_priv *priv = app->priv;
|
||||
struct nfp_tun_mac_non_nfp_idx *entry;
|
||||
struct list_head *ptr, *storage;
|
||||
int idx;
|
||||
|
||||
mutex_lock(&priv->nfp_mac_index_lock);
|
||||
list_for_each_safe(ptr, storage, &priv->nfp_mac_index_list) {
|
||||
entry = list_entry(ptr, struct nfp_tun_mac_non_nfp_idx, list);
|
||||
if (entry->ifindex == ifindex) {
|
||||
idx = entry->index;
|
||||
mutex_unlock(&priv->nfp_mac_index_lock);
|
||||
return idx;
|
||||
}
|
||||
}
|
||||
|
||||
idx = ida_simple_get(&priv->nfp_mac_off_ids, 0,
|
||||
NFP_MAX_MAC_INDEX, GFP_KERNEL);
|
||||
if (idx < 0) {
|
||||
mutex_unlock(&priv->nfp_mac_index_lock);
|
||||
return idx;
|
||||
}
|
||||
|
||||
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
|
||||
if (!entry) {
|
||||
mutex_unlock(&priv->nfp_mac_index_lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
entry->ifindex = ifindex;
|
||||
entry->index = idx;
|
||||
list_add_tail(&entry->list, &priv->nfp_mac_index_list);
|
||||
mutex_unlock(&priv->nfp_mac_index_lock);
|
||||
|
||||
return idx;
|
||||
}
|
||||
|
||||
static void nfp_tun_del_mac_idx(struct nfp_app *app, int ifindex)
|
||||
{
|
||||
struct nfp_flower_priv *priv = app->priv;
|
||||
struct nfp_tun_mac_non_nfp_idx *entry;
|
||||
struct list_head *ptr, *storage;
|
||||
|
||||
mutex_lock(&priv->nfp_mac_index_lock);
|
||||
list_for_each_safe(ptr, storage, &priv->nfp_mac_index_list) {
|
||||
entry = list_entry(ptr, struct nfp_tun_mac_non_nfp_idx, list);
|
||||
if (entry->ifindex == ifindex) {
|
||||
ida_simple_remove(&priv->nfp_mac_off_ids,
|
||||
entry->index);
|
||||
list_del(&entry->list);
|
||||
kfree(entry);
|
||||
break;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&priv->nfp_mac_index_lock);
|
||||
}
|
||||
|
||||
static void nfp_tun_add_to_mac_offload_list(struct net_device *netdev,
|
||||
struct nfp_app *app)
|
||||
{
|
||||
struct nfp_flower_priv *priv = app->priv;
|
||||
struct nfp_tun_mac_offload_entry *entry;
|
||||
u16 nfp_mac_idx;
|
||||
int port = 0;
|
||||
|
||||
/* Check if MAC should be offloaded. */
|
||||
if (!is_valid_ether_addr(netdev->dev_addr))
|
||||
return;
|
||||
|
||||
if (nfp_netdev_is_nfp_repr(netdev))
|
||||
port = nfp_repr_get_port_id(netdev);
|
||||
else if (!nfp_fl_is_netdev_to_offload(netdev))
|
||||
return;
|
||||
|
||||
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
|
||||
if (!entry) {
|
||||
nfp_flower_cmsg_warn(app, "Mem fail when offloading MAC.\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port) ==
|
||||
NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT) {
|
||||
nfp_mac_idx = port << 8 | NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT;
|
||||
} else if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port) ==
|
||||
NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT) {
|
||||
port = FIELD_GET(NFP_FLOWER_CMSG_PORT_VNIC, port);
|
||||
nfp_mac_idx = port << 8 | NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT;
|
||||
} else {
|
||||
/* Must assign our own unique 8-bit index. */
|
||||
int idx = nfp_tun_get_mac_idx(app, netdev->ifindex);
|
||||
NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT)
|
||||
return true;
|
||||
|
||||
if (idx < 0) {
|
||||
nfp_flower_cmsg_warn(app, "Can't assign non-repr MAC index.\n");
|
||||
kfree(entry);
|
||||
return;
|
||||
}
|
||||
nfp_mac_idx = idx << 8 | NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT;
|
||||
return false;
|
||||
}
|
||||
|
||||
static u16 nfp_tunnel_get_mac_idx_from_phy_port_id(int port)
|
||||
{
|
||||
return port << 8 | NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT;
|
||||
}
|
||||
|
||||
static u16 nfp_tunnel_get_global_mac_idx_from_ida(int id)
|
||||
{
|
||||
return id << 8 | NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT;
|
||||
}
|
||||
|
||||
static int nfp_tunnel_get_ida_from_global_mac_idx(u16 nfp_mac_idx)
|
||||
{
|
||||
return nfp_mac_idx >> 8;
|
||||
}
|
||||
|
||||
static bool nfp_tunnel_is_mac_idx_global(u16 nfp_mac_idx)
|
||||
{
|
||||
return (nfp_mac_idx & 0xff) == NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT;
|
||||
}
|
||||
|
||||
static struct nfp_tun_offloaded_mac *
|
||||
nfp_tunnel_lookup_offloaded_macs(struct nfp_app *app, u8 *mac)
|
||||
{
|
||||
struct nfp_flower_priv *priv = app->priv;
|
||||
|
||||
return rhashtable_lookup_fast(&priv->tun.offloaded_macs, mac,
|
||||
offloaded_macs_params);
|
||||
}
|
||||
|
||||
static void
|
||||
nfp_tunnel_offloaded_macs_inc_ref_and_link(struct nfp_tun_offloaded_mac *entry,
|
||||
struct net_device *netdev, bool mod)
|
||||
{
|
||||
if (nfp_netdev_is_nfp_repr(netdev)) {
|
||||
struct nfp_flower_repr_priv *repr_priv;
|
||||
struct nfp_repr *repr;
|
||||
|
||||
repr = netdev_priv(netdev);
|
||||
repr_priv = repr->app_priv;
|
||||
|
||||
/* If modifing MAC, remove repr from old list first. */
|
||||
if (mod)
|
||||
list_del(&repr_priv->mac_list);
|
||||
|
||||
list_add_tail(&repr_priv->mac_list, &entry->repr_list);
|
||||
}
|
||||
|
||||
entry->index = cpu_to_be16(nfp_mac_idx);
|
||||
ether_addr_copy(entry->addr, netdev->dev_addr);
|
||||
entry->ref_count++;
|
||||
}
|
||||
|
||||
mutex_lock(&priv->nfp_mac_off_lock);
|
||||
priv->nfp_mac_off_count++;
|
||||
list_add_tail(&entry->list, &priv->nfp_mac_off_list);
|
||||
mutex_unlock(&priv->nfp_mac_off_lock);
|
||||
static int
|
||||
nfp_tunnel_add_shared_mac(struct nfp_app *app, struct net_device *netdev,
|
||||
int port, bool mod)
|
||||
{
|
||||
struct nfp_flower_priv *priv = app->priv;
|
||||
int ida_idx = NFP_MAX_MAC_INDEX, err;
|
||||
struct nfp_tun_offloaded_mac *entry;
|
||||
u16 nfp_mac_idx = 0;
|
||||
|
||||
entry = nfp_tunnel_lookup_offloaded_macs(app, netdev->dev_addr);
|
||||
if (entry && nfp_tunnel_is_mac_idx_global(entry->index)) {
|
||||
nfp_tunnel_offloaded_macs_inc_ref_and_link(entry, netdev, mod);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Assign a global index if non-repr or MAC address is now shared. */
|
||||
if (entry || !port) {
|
||||
ida_idx = ida_simple_get(&priv->tun.mac_off_ids, 0,
|
||||
NFP_MAX_MAC_INDEX, GFP_KERNEL);
|
||||
if (ida_idx < 0)
|
||||
return ida_idx;
|
||||
|
||||
nfp_mac_idx = nfp_tunnel_get_global_mac_idx_from_ida(ida_idx);
|
||||
} else {
|
||||
nfp_mac_idx = nfp_tunnel_get_mac_idx_from_phy_port_id(port);
|
||||
}
|
||||
|
||||
if (!entry) {
|
||||
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
|
||||
if (!entry) {
|
||||
err = -ENOMEM;
|
||||
goto err_free_ida;
|
||||
}
|
||||
|
||||
ether_addr_copy(entry->addr, netdev->dev_addr);
|
||||
INIT_LIST_HEAD(&entry->repr_list);
|
||||
|
||||
if (rhashtable_insert_fast(&priv->tun.offloaded_macs,
|
||||
&entry->ht_node,
|
||||
offloaded_macs_params)) {
|
||||
err = -ENOMEM;
|
||||
goto err_free_entry;
|
||||
}
|
||||
}
|
||||
|
||||
err = __nfp_tunnel_offload_mac(app, netdev->dev_addr,
|
||||
nfp_mac_idx, false);
|
||||
if (err) {
|
||||
/* If not shared then free. */
|
||||
if (!entry->ref_count)
|
||||
goto err_remove_hash;
|
||||
goto err_free_ida;
|
||||
}
|
||||
|
||||
entry->index = nfp_mac_idx;
|
||||
nfp_tunnel_offloaded_macs_inc_ref_and_link(entry, netdev, mod);
|
||||
|
||||
return 0;
|
||||
|
||||
err_remove_hash:
|
||||
rhashtable_remove_fast(&priv->tun.offloaded_macs, &entry->ht_node,
|
||||
offloaded_macs_params);
|
||||
err_free_entry:
|
||||
kfree(entry);
|
||||
err_free_ida:
|
||||
if (ida_idx != NFP_MAX_MAC_INDEX)
|
||||
ida_simple_remove(&priv->tun.mac_off_ids, ida_idx);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static int
|
||||
nfp_tunnel_del_shared_mac(struct nfp_app *app, struct net_device *netdev,
|
||||
u8 *mac, bool mod)
|
||||
{
|
||||
struct nfp_flower_priv *priv = app->priv;
|
||||
struct nfp_flower_repr_priv *repr_priv;
|
||||
struct nfp_tun_offloaded_mac *entry;
|
||||
struct nfp_repr *repr;
|
||||
int ida_idx;
|
||||
|
||||
entry = nfp_tunnel_lookup_offloaded_macs(app, mac);
|
||||
if (!entry)
|
||||
return 0;
|
||||
|
||||
entry->ref_count--;
|
||||
/* If del is part of a mod then mac_list is still in use elsewheree. */
|
||||
if (nfp_netdev_is_nfp_repr(netdev) && !mod) {
|
||||
repr = netdev_priv(netdev);
|
||||
repr_priv = repr->app_priv;
|
||||
list_del(&repr_priv->mac_list);
|
||||
}
|
||||
|
||||
/* If MAC is now used by 1 repr set the offloaded MAC index to port. */
|
||||
if (entry->ref_count == 1 && list_is_singular(&entry->repr_list)) {
|
||||
u16 nfp_mac_idx;
|
||||
int port, err;
|
||||
|
||||
repr_priv = list_first_entry(&entry->repr_list,
|
||||
struct nfp_flower_repr_priv,
|
||||
mac_list);
|
||||
repr = repr_priv->nfp_repr;
|
||||
port = nfp_repr_get_port_id(repr->netdev);
|
||||
nfp_mac_idx = nfp_tunnel_get_mac_idx_from_phy_port_id(port);
|
||||
err = __nfp_tunnel_offload_mac(app, mac, nfp_mac_idx, false);
|
||||
if (err) {
|
||||
nfp_flower_cmsg_warn(app, "MAC offload index revert failed on %s.\n",
|
||||
netdev_name(netdev));
|
||||
return 0;
|
||||
}
|
||||
|
||||
ida_idx = nfp_tunnel_get_ida_from_global_mac_idx(entry->index);
|
||||
ida_simple_remove(&priv->tun.mac_off_ids, ida_idx);
|
||||
entry->index = nfp_mac_idx;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (entry->ref_count)
|
||||
return 0;
|
||||
|
||||
WARN_ON_ONCE(rhashtable_remove_fast(&priv->tun.offloaded_macs,
|
||||
&entry->ht_node,
|
||||
offloaded_macs_params));
|
||||
/* If MAC has global ID then extract and free the ida entry. */
|
||||
if (nfp_tunnel_is_mac_idx_global(entry->index)) {
|
||||
ida_idx = nfp_tunnel_get_ida_from_global_mac_idx(entry->index);
|
||||
ida_simple_remove(&priv->tun.mac_off_ids, ida_idx);
|
||||
}
|
||||
|
||||
kfree(entry);
|
||||
|
||||
return __nfp_tunnel_offload_mac(app, mac, 0, true);
|
||||
}
|
||||
|
||||
static int
|
||||
nfp_tunnel_offload_mac(struct nfp_app *app, struct net_device *netdev,
|
||||
enum nfp_flower_mac_offload_cmd cmd)
|
||||
{
|
||||
struct nfp_flower_non_repr_priv *nr_priv = NULL;
|
||||
bool non_repr = false, *mac_offloaded;
|
||||
u8 *off_mac = NULL;
|
||||
int err, port = 0;
|
||||
|
||||
if (nfp_netdev_is_nfp_repr(netdev)) {
|
||||
struct nfp_flower_repr_priv *repr_priv;
|
||||
struct nfp_repr *repr;
|
||||
|
||||
repr = netdev_priv(netdev);
|
||||
if (repr->app != app)
|
||||
return 0;
|
||||
|
||||
repr_priv = repr->app_priv;
|
||||
mac_offloaded = &repr_priv->mac_offloaded;
|
||||
off_mac = &repr_priv->offloaded_mac_addr[0];
|
||||
port = nfp_repr_get_port_id(netdev);
|
||||
if (!nfp_tunnel_port_is_phy_repr(port))
|
||||
return 0;
|
||||
} else if (nfp_fl_is_netdev_to_offload(netdev)) {
|
||||
nr_priv = nfp_flower_non_repr_priv_get(app, netdev);
|
||||
if (!nr_priv)
|
||||
return -ENOMEM;
|
||||
|
||||
mac_offloaded = &nr_priv->mac_offloaded;
|
||||
off_mac = &nr_priv->offloaded_mac_addr[0];
|
||||
non_repr = true;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!is_valid_ether_addr(netdev->dev_addr)) {
|
||||
err = -EINVAL;
|
||||
goto err_put_non_repr_priv;
|
||||
}
|
||||
|
||||
if (cmd == NFP_TUNNEL_MAC_OFFLOAD_MOD && !*mac_offloaded)
|
||||
cmd = NFP_TUNNEL_MAC_OFFLOAD_ADD;
|
||||
|
||||
switch (cmd) {
|
||||
case NFP_TUNNEL_MAC_OFFLOAD_ADD:
|
||||
err = nfp_tunnel_add_shared_mac(app, netdev, port, false);
|
||||
if (err)
|
||||
goto err_put_non_repr_priv;
|
||||
|
||||
if (non_repr)
|
||||
__nfp_flower_non_repr_priv_get(nr_priv);
|
||||
|
||||
*mac_offloaded = true;
|
||||
ether_addr_copy(off_mac, netdev->dev_addr);
|
||||
break;
|
||||
case NFP_TUNNEL_MAC_OFFLOAD_DEL:
|
||||
/* Only attempt delete if add was successful. */
|
||||
if (!*mac_offloaded)
|
||||
break;
|
||||
|
||||
if (non_repr)
|
||||
__nfp_flower_non_repr_priv_put(nr_priv);
|
||||
|
||||
*mac_offloaded = false;
|
||||
|
||||
err = nfp_tunnel_del_shared_mac(app, netdev, netdev->dev_addr,
|
||||
false);
|
||||
if (err)
|
||||
goto err_put_non_repr_priv;
|
||||
|
||||
break;
|
||||
case NFP_TUNNEL_MAC_OFFLOAD_MOD:
|
||||
/* Ignore if changing to the same address. */
|
||||
if (ether_addr_equal(netdev->dev_addr, off_mac))
|
||||
break;
|
||||
|
||||
err = nfp_tunnel_add_shared_mac(app, netdev, port, true);
|
||||
if (err)
|
||||
goto err_put_non_repr_priv;
|
||||
|
||||
/* Delete the previous MAC address. */
|
||||
err = nfp_tunnel_del_shared_mac(app, netdev, off_mac, true);
|
||||
if (err)
|
||||
nfp_flower_cmsg_warn(app, "Failed to remove offload of replaced MAC addr on %s.\n",
|
||||
netdev_name(netdev));
|
||||
|
||||
ether_addr_copy(off_mac, netdev->dev_addr);
|
||||
break;
|
||||
default:
|
||||
err = -EINVAL;
|
||||
goto err_put_non_repr_priv;
|
||||
}
|
||||
|
||||
if (non_repr)
|
||||
__nfp_flower_non_repr_priv_put(nr_priv);
|
||||
|
||||
return 0;
|
||||
|
||||
err_put_non_repr_priv:
|
||||
if (non_repr)
|
||||
__nfp_flower_non_repr_priv_put(nr_priv);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int nfp_tunnel_mac_event_handler(struct nfp_app *app,
|
||||
struct net_device *netdev,
|
||||
unsigned long event, void *ptr)
|
||||
{
|
||||
if (event == NETDEV_DOWN || event == NETDEV_UNREGISTER) {
|
||||
/* If non-nfp netdev then free its offload index. */
|
||||
if (nfp_fl_is_netdev_to_offload(netdev))
|
||||
nfp_tun_del_mac_idx(app, netdev->ifindex);
|
||||
} else if (event == NETDEV_UP || event == NETDEV_CHANGEADDR ||
|
||||
event == NETDEV_REGISTER) {
|
||||
nfp_tun_add_to_mac_offload_list(netdev, app);
|
||||
int err;
|
||||
|
||||
/* Force a list write to keep NFP up to date. */
|
||||
nfp_tunnel_write_macs(app);
|
||||
if (event == NETDEV_DOWN) {
|
||||
err = nfp_tunnel_offload_mac(app, netdev,
|
||||
NFP_TUNNEL_MAC_OFFLOAD_DEL);
|
||||
if (err)
|
||||
nfp_flower_cmsg_warn(app, "Failed to delete offload MAC on %s.\n",
|
||||
netdev_name(netdev));
|
||||
} else if (event == NETDEV_UP) {
|
||||
err = nfp_tunnel_offload_mac(app, netdev,
|
||||
NFP_TUNNEL_MAC_OFFLOAD_ADD);
|
||||
if (err)
|
||||
nfp_flower_cmsg_warn(app, "Failed to offload MAC on %s.\n",
|
||||
netdev_name(netdev));
|
||||
} else if (event == NETDEV_CHANGEADDR) {
|
||||
/* Only offload addr change if netdev is already up. */
|
||||
if (!(netdev->flags & IFF_UP))
|
||||
return NOTIFY_OK;
|
||||
|
||||
err = nfp_tunnel_offload_mac(app, netdev,
|
||||
NFP_TUNNEL_MAC_OFFLOAD_MOD);
|
||||
if (err)
|
||||
nfp_flower_cmsg_warn(app, "Failed to offload MAC change on %s.\n",
|
||||
netdev_name(netdev));
|
||||
}
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
@ -660,68 +830,62 @@ int nfp_tunnel_mac_event_handler(struct nfp_app *app,
|
|||
int nfp_tunnel_config_start(struct nfp_app *app)
|
||||
{
|
||||
struct nfp_flower_priv *priv = app->priv;
|
||||
int err;
|
||||
|
||||
/* Initialise priv data for MAC offloading. */
|
||||
priv->nfp_mac_off_count = 0;
|
||||
mutex_init(&priv->nfp_mac_off_lock);
|
||||
INIT_LIST_HEAD(&priv->nfp_mac_off_list);
|
||||
mutex_init(&priv->nfp_mac_index_lock);
|
||||
INIT_LIST_HEAD(&priv->nfp_mac_index_list);
|
||||
ida_init(&priv->nfp_mac_off_ids);
|
||||
/* Initialise rhash for MAC offload tracking. */
|
||||
err = rhashtable_init(&priv->tun.offloaded_macs,
|
||||
&offloaded_macs_params);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
ida_init(&priv->tun.mac_off_ids);
|
||||
|
||||
/* Initialise priv data for IPv4 offloading. */
|
||||
mutex_init(&priv->nfp_ipv4_off_lock);
|
||||
INIT_LIST_HEAD(&priv->nfp_ipv4_off_list);
|
||||
mutex_init(&priv->tun.ipv4_off_lock);
|
||||
INIT_LIST_HEAD(&priv->tun.ipv4_off_list);
|
||||
|
||||
/* Initialise priv data for neighbour offloading. */
|
||||
spin_lock_init(&priv->nfp_neigh_off_lock);
|
||||
INIT_LIST_HEAD(&priv->nfp_neigh_off_list);
|
||||
priv->nfp_tun_neigh_nb.notifier_call = nfp_tun_neigh_event_handler;
|
||||
spin_lock_init(&priv->tun.neigh_off_lock);
|
||||
INIT_LIST_HEAD(&priv->tun.neigh_off_list);
|
||||
priv->tun.neigh_nb.notifier_call = nfp_tun_neigh_event_handler;
|
||||
|
||||
return register_netevent_notifier(&priv->nfp_tun_neigh_nb);
|
||||
err = register_netevent_notifier(&priv->tun.neigh_nb);
|
||||
if (err) {
|
||||
rhashtable_free_and_destroy(&priv->tun.offloaded_macs,
|
||||
nfp_check_rhashtable_empty, NULL);
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void nfp_tunnel_config_stop(struct nfp_app *app)
|
||||
{
|
||||
struct nfp_tun_mac_offload_entry *mac_entry;
|
||||
struct nfp_flower_priv *priv = app->priv;
|
||||
struct nfp_ipv4_route_entry *route_entry;
|
||||
struct nfp_tun_mac_non_nfp_idx *mac_idx;
|
||||
struct nfp_ipv4_addr_entry *ip_entry;
|
||||
struct list_head *ptr, *storage;
|
||||
|
||||
unregister_netevent_notifier(&priv->nfp_tun_neigh_nb);
|
||||
unregister_netevent_notifier(&priv->tun.neigh_nb);
|
||||
|
||||
/* Free any memory that may be occupied by MAC list. */
|
||||
list_for_each_safe(ptr, storage, &priv->nfp_mac_off_list) {
|
||||
mac_entry = list_entry(ptr, struct nfp_tun_mac_offload_entry,
|
||||
list);
|
||||
list_del(&mac_entry->list);
|
||||
kfree(mac_entry);
|
||||
}
|
||||
|
||||
/* Free any memory that may be occupied by MAC index list. */
|
||||
list_for_each_safe(ptr, storage, &priv->nfp_mac_index_list) {
|
||||
mac_idx = list_entry(ptr, struct nfp_tun_mac_non_nfp_idx,
|
||||
list);
|
||||
list_del(&mac_idx->list);
|
||||
kfree(mac_idx);
|
||||
}
|
||||
|
||||
ida_destroy(&priv->nfp_mac_off_ids);
|
||||
ida_destroy(&priv->tun.mac_off_ids);
|
||||
|
||||
/* Free any memory that may be occupied by ipv4 list. */
|
||||
list_for_each_safe(ptr, storage, &priv->nfp_ipv4_off_list) {
|
||||
list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) {
|
||||
ip_entry = list_entry(ptr, struct nfp_ipv4_addr_entry, list);
|
||||
list_del(&ip_entry->list);
|
||||
kfree(ip_entry);
|
||||
}
|
||||
|
||||
/* Free any memory that may be occupied by the route list. */
|
||||
list_for_each_safe(ptr, storage, &priv->nfp_neigh_off_list) {
|
||||
list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) {
|
||||
route_entry = list_entry(ptr, struct nfp_ipv4_route_entry,
|
||||
list);
|
||||
list_del(&route_entry->list);
|
||||
kfree(route_entry);
|
||||
}
|
||||
|
||||
/* Destroy rhash. Entries should be cleaned on netdev notifier unreg. */
|
||||
rhashtable_free_and_destroy(&priv->tun.offloaded_macs,
|
||||
nfp_check_rhashtable_empty, NULL);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue