mirror of https://gitee.com/openkylin/linux.git
Merge branch 'bpf-xdp-redirect'
Björn Töpel says: ==================== This two patch series contain two optimizations for the bpf_redirect_map() helper and the xdp_do_redirect() function. The bpf_redirect_map() optimization is about avoiding the map lookup dispatching. Instead of having a switch-statement and selecting the correct lookup function, we let bpf_redirect_map() be a map operation, where each map has its own bpf_redirect_map() implementation. This way the run-time lookup is avoided. The xdp_do_redirect() patch restructures the code, so that the map pointer indirection can be avoided. Performance-wise I got 4% improvement for XSKMAP (sample:xdpsock/rx-drop), and 8% (sample:xdp_redirect_map) on my machine. v5->v6: Removed REDIR enum, and instead use map_id and map_type. (Daniel) Applied Daniel's fixups on patch 1. (Daniel) v4->v5: Renamed map operation to map_redirect. (Daniel) v3->v4: Made bpf_redirect_map() a map operation. (Daniel) v2->v3: Fix build when CONFIG_NET is not set. (lkp) v1->v2: Removed warning when CONFIG_BPF_SYSCALL was not set. (lkp) Cleaned up case-clause in xdp_do_generic_redirect_map(). (Toke) Re-added comment. (Toke) rfc->v1: Use map_id, and remove bpf_clear_redirect_map(). (Toke) Get rid of the macro and use __always_inline. (Jesper) ==================== Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
This commit is contained in:
commit
32f91529e2
|
@ -118,6 +118,9 @@ struct bpf_map_ops {
|
|||
void *owner, u32 size);
|
||||
struct bpf_local_storage __rcu ** (*map_owner_storage_ptr)(void *owner);
|
||||
|
||||
/* Misc helpers.*/
|
||||
int (*map_redirect)(struct bpf_map *map, u32 ifindex, u64 flags);
|
||||
|
||||
/* map_meta_equal must be implemented for maps that can be
|
||||
* used as an inner map. It is a runtime check to ensure
|
||||
* an inner map can be inserted to an outer map.
|
||||
|
@ -1450,9 +1453,9 @@ struct btf *bpf_get_btf_vmlinux(void);
|
|||
/* Map specifics */
|
||||
struct xdp_buff;
|
||||
struct sk_buff;
|
||||
struct bpf_dtab_netdev;
|
||||
struct bpf_cpu_map_entry;
|
||||
|
||||
struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
|
||||
struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key);
|
||||
void __dev_flush(void);
|
||||
int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
|
||||
struct net_device *dev_rx);
|
||||
|
@ -1462,7 +1465,6 @@ int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
|
|||
struct bpf_prog *xdp_prog);
|
||||
bool dev_map_can_have_prog(struct bpf_map *map);
|
||||
|
||||
struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key);
|
||||
void __cpu_map_flush(void);
|
||||
int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp,
|
||||
struct net_device *dev_rx);
|
||||
|
@ -1593,17 +1595,6 @@ static inline int bpf_obj_get_user(const char __user *pathname, int flags)
|
|||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline struct net_device *__dev_map_lookup_elem(struct bpf_map *map,
|
||||
u32 key)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct net_device *__dev_map_hash_lookup_elem(struct bpf_map *map,
|
||||
u32 key)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
static inline bool dev_map_can_have_prog(struct bpf_map *map)
|
||||
{
|
||||
return false;
|
||||
|
@ -1615,6 +1606,7 @@ static inline void __dev_flush(void)
|
|||
|
||||
struct xdp_buff;
|
||||
struct bpf_dtab_netdev;
|
||||
struct bpf_cpu_map_entry;
|
||||
|
||||
static inline
|
||||
int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
|
||||
|
@ -1639,12 +1631,6 @@ static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static inline
|
||||
struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline void __cpu_map_flush(void)
|
||||
{
|
||||
}
|
||||
|
|
|
@ -646,7 +646,8 @@ struct bpf_redirect_info {
|
|||
u32 flags;
|
||||
u32 tgt_index;
|
||||
void *tgt_value;
|
||||
struct bpf_map *map;
|
||||
u32 map_id;
|
||||
enum bpf_map_type map_type;
|
||||
u32 kern_flags;
|
||||
struct bpf_nh_params nh;
|
||||
};
|
||||
|
@ -1472,4 +1473,32 @@ static inline bool bpf_sk_lookup_run_v6(struct net *net, int protocol,
|
|||
}
|
||||
#endif /* IS_ENABLED(CONFIG_IPV6) */
|
||||
|
||||
static __always_inline int __bpf_xdp_redirect_map(struct bpf_map *map, u32 ifindex, u64 flags,
|
||||
void *lookup_elem(struct bpf_map *map, u32 key))
|
||||
{
|
||||
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
|
||||
|
||||
/* Lower bits of the flags are used as return code on lookup failure */
|
||||
if (unlikely(flags > XDP_TX))
|
||||
return XDP_ABORTED;
|
||||
|
||||
ri->tgt_value = lookup_elem(map, ifindex);
|
||||
if (unlikely(!ri->tgt_value)) {
|
||||
/* If the lookup fails we want to clear out the state in the
|
||||
* redirect_info struct completely, so that if an eBPF program
|
||||
* performs multiple lookups, the last one always takes
|
||||
* precedence.
|
||||
*/
|
||||
ri->map_id = INT_MAX; /* Valid map id idr range: [1,INT_MAX[ */
|
||||
ri->map_type = BPF_MAP_TYPE_UNSPEC;
|
||||
return flags;
|
||||
}
|
||||
|
||||
ri->tgt_index = ifindex;
|
||||
ri->map_id = map->id;
|
||||
ri->map_type = map->map_type;
|
||||
|
||||
return XDP_REDIRECT;
|
||||
}
|
||||
|
||||
#endif /* __LINUX_FILTER_H__ */
|
||||
|
|
|
@ -80,19 +80,6 @@ int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
|
|||
int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp);
|
||||
void __xsk_map_flush(void);
|
||||
|
||||
static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map,
|
||||
u32 key)
|
||||
{
|
||||
struct xsk_map *m = container_of(map, struct xsk_map, map);
|
||||
struct xdp_sock *xs;
|
||||
|
||||
if (key >= map->max_entries)
|
||||
return NULL;
|
||||
|
||||
xs = READ_ONCE(m->xsk_map[key]);
|
||||
return xs;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
|
||||
|
@ -109,12 +96,6 @@ static inline void __xsk_map_flush(void)
|
|||
{
|
||||
}
|
||||
|
||||
static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map,
|
||||
u32 key)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_XDP_SOCKETS */
|
||||
|
||||
#endif /* _LINUX_XDP_SOCK_H */
|
||||
|
|
|
@ -86,19 +86,15 @@ struct _bpf_dtab_netdev {
|
|||
};
|
||||
#endif /* __DEVMAP_OBJ_TYPE */
|
||||
|
||||
#define devmap_ifindex(tgt, map) \
|
||||
(((map->map_type == BPF_MAP_TYPE_DEVMAP || \
|
||||
map->map_type == BPF_MAP_TYPE_DEVMAP_HASH)) ? \
|
||||
((struct _bpf_dtab_netdev *)tgt)->dev->ifindex : 0)
|
||||
|
||||
DECLARE_EVENT_CLASS(xdp_redirect_template,
|
||||
|
||||
TP_PROTO(const struct net_device *dev,
|
||||
const struct bpf_prog *xdp,
|
||||
const void *tgt, int err,
|
||||
const struct bpf_map *map, u32 index),
|
||||
enum bpf_map_type map_type,
|
||||
u32 map_id, u32 index),
|
||||
|
||||
TP_ARGS(dev, xdp, tgt, err, map, index),
|
||||
TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(int, prog_id)
|
||||
|
@ -111,14 +107,22 @@ DECLARE_EVENT_CLASS(xdp_redirect_template,
|
|||
),
|
||||
|
||||
TP_fast_assign(
|
||||
u32 ifindex = 0, map_index = index;
|
||||
|
||||
if (map_type == BPF_MAP_TYPE_DEVMAP || map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
|
||||
ifindex = ((struct _bpf_dtab_netdev *)tgt)->dev->ifindex;
|
||||
} else if (map_type == BPF_MAP_TYPE_UNSPEC && map_id == INT_MAX) {
|
||||
ifindex = index;
|
||||
map_index = 0;
|
||||
}
|
||||
|
||||
__entry->prog_id = xdp->aux->id;
|
||||
__entry->act = XDP_REDIRECT;
|
||||
__entry->ifindex = dev->ifindex;
|
||||
__entry->err = err;
|
||||
__entry->to_ifindex = map ? devmap_ifindex(tgt, map) :
|
||||
index;
|
||||
__entry->map_id = map ? map->id : 0;
|
||||
__entry->map_index = map ? index : 0;
|
||||
__entry->to_ifindex = ifindex;
|
||||
__entry->map_id = map_id;
|
||||
__entry->map_index = map_index;
|
||||
),
|
||||
|
||||
TP_printk("prog_id=%d action=%s ifindex=%d to_ifindex=%d err=%d"
|
||||
|
@ -133,45 +137,49 @@ DEFINE_EVENT(xdp_redirect_template, xdp_redirect,
|
|||
TP_PROTO(const struct net_device *dev,
|
||||
const struct bpf_prog *xdp,
|
||||
const void *tgt, int err,
|
||||
const struct bpf_map *map, u32 index),
|
||||
TP_ARGS(dev, xdp, tgt, err, map, index)
|
||||
enum bpf_map_type map_type,
|
||||
u32 map_id, u32 index),
|
||||
TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(xdp_redirect_template, xdp_redirect_err,
|
||||
TP_PROTO(const struct net_device *dev,
|
||||
const struct bpf_prog *xdp,
|
||||
const void *tgt, int err,
|
||||
const struct bpf_map *map, u32 index),
|
||||
TP_ARGS(dev, xdp, tgt, err, map, index)
|
||||
enum bpf_map_type map_type,
|
||||
u32 map_id, u32 index),
|
||||
TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index)
|
||||
);
|
||||
|
||||
#define _trace_xdp_redirect(dev, xdp, to) \
|
||||
trace_xdp_redirect(dev, xdp, NULL, 0, NULL, to)
|
||||
trace_xdp_redirect(dev, xdp, NULL, 0, BPF_MAP_TYPE_UNSPEC, INT_MAX, to)
|
||||
|
||||
#define _trace_xdp_redirect_err(dev, xdp, to, err) \
|
||||
trace_xdp_redirect_err(dev, xdp, NULL, err, NULL, to)
|
||||
trace_xdp_redirect_err(dev, xdp, NULL, err, BPF_MAP_TYPE_UNSPEC, INT_MAX, to)
|
||||
|
||||
#define _trace_xdp_redirect_map(dev, xdp, to, map, index) \
|
||||
trace_xdp_redirect(dev, xdp, to, 0, map, index)
|
||||
#define _trace_xdp_redirect_map(dev, xdp, to, map_type, map_id, index) \
|
||||
trace_xdp_redirect(dev, xdp, to, 0, map_type, map_id, index)
|
||||
|
||||
#define _trace_xdp_redirect_map_err(dev, xdp, to, map, index, err) \
|
||||
trace_xdp_redirect_err(dev, xdp, to, err, map, index)
|
||||
#define _trace_xdp_redirect_map_err(dev, xdp, to, map_type, map_id, index, err) \
|
||||
trace_xdp_redirect_err(dev, xdp, to, err, map_type, map_id, index)
|
||||
|
||||
/* not used anymore, but kept around so as not to break old programs */
|
||||
DEFINE_EVENT(xdp_redirect_template, xdp_redirect_map,
|
||||
TP_PROTO(const struct net_device *dev,
|
||||
const struct bpf_prog *xdp,
|
||||
const void *tgt, int err,
|
||||
const struct bpf_map *map, u32 index),
|
||||
TP_ARGS(dev, xdp, tgt, err, map, index)
|
||||
enum bpf_map_type map_type,
|
||||
u32 map_id, u32 index),
|
||||
TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(xdp_redirect_template, xdp_redirect_map_err,
|
||||
TP_PROTO(const struct net_device *dev,
|
||||
const struct bpf_prog *xdp,
|
||||
const void *tgt, int err,
|
||||
const struct bpf_map *map, u32 index),
|
||||
TP_ARGS(dev, xdp, tgt, err, map, index)
|
||||
enum bpf_map_type map_type,
|
||||
u32 map_id, u32 index),
|
||||
TP_ARGS(dev, xdp, tgt, err, map_type, map_id, index)
|
||||
);
|
||||
|
||||
TRACE_EVENT(xdp_cpumap_kthread,
|
||||
|
|
|
@ -543,7 +543,6 @@ static void cpu_map_free(struct bpf_map *map)
|
|||
* complete.
|
||||
*/
|
||||
|
||||
bpf_clear_redirect_map(map);
|
||||
synchronize_rcu();
|
||||
|
||||
/* For cpu_map the remote CPUs can still be using the entries
|
||||
|
@ -563,7 +562,7 @@ static void cpu_map_free(struct bpf_map *map)
|
|||
kfree(cmap);
|
||||
}
|
||||
|
||||
struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
|
||||
static void *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
|
||||
{
|
||||
struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
|
||||
struct bpf_cpu_map_entry *rcpu;
|
||||
|
@ -600,6 +599,11 @@ static int cpu_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int cpu_map_redirect(struct bpf_map *map, u32 ifindex, u64 flags)
|
||||
{
|
||||
return __bpf_xdp_redirect_map(map, ifindex, flags, __cpu_map_lookup_elem);
|
||||
}
|
||||
|
||||
static int cpu_map_btf_id;
|
||||
const struct bpf_map_ops cpu_map_ops = {
|
||||
.map_meta_equal = bpf_map_meta_equal,
|
||||
|
@ -612,6 +616,7 @@ const struct bpf_map_ops cpu_map_ops = {
|
|||
.map_check_btf = map_check_no_btf,
|
||||
.map_btf_name = "bpf_cpu_map",
|
||||
.map_btf_id = &cpu_map_btf_id,
|
||||
.map_redirect = cpu_map_redirect,
|
||||
};
|
||||
|
||||
static void bq_flush_to_queue(struct xdp_bulk_queue *bq)
|
||||
|
|
|
@ -197,7 +197,6 @@ static void dev_map_free(struct bpf_map *map)
|
|||
list_del_rcu(&dtab->list);
|
||||
spin_unlock(&dev_map_lock);
|
||||
|
||||
bpf_clear_redirect_map(map);
|
||||
synchronize_rcu();
|
||||
|
||||
/* Make sure prior __dev_map_entry_free() have completed. */
|
||||
|
@ -258,7 +257,7 @@ static int dev_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
|
|||
return 0;
|
||||
}
|
||||
|
||||
struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key)
|
||||
static void *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key)
|
||||
{
|
||||
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
|
||||
struct hlist_head *head = dev_map_index_hash(dtab, key);
|
||||
|
@ -392,7 +391,7 @@ void __dev_flush(void)
|
|||
* update happens in parallel here a dev_put wont happen until after reading the
|
||||
* ifindex.
|
||||
*/
|
||||
struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
|
||||
static void *__dev_map_lookup_elem(struct bpf_map *map, u32 key)
|
||||
{
|
||||
struct bpf_dtab *dtab = container_of(map, struct bpf_dtab, map);
|
||||
struct bpf_dtab_netdev *obj;
|
||||
|
@ -735,6 +734,16 @@ static int dev_map_hash_update_elem(struct bpf_map *map, void *key, void *value,
|
|||
map, key, value, map_flags);
|
||||
}
|
||||
|
||||
static int dev_map_redirect(struct bpf_map *map, u32 ifindex, u64 flags)
|
||||
{
|
||||
return __bpf_xdp_redirect_map(map, ifindex, flags, __dev_map_lookup_elem);
|
||||
}
|
||||
|
||||
static int dev_hash_map_redirect(struct bpf_map *map, u32 ifindex, u64 flags)
|
||||
{
|
||||
return __bpf_xdp_redirect_map(map, ifindex, flags, __dev_map_hash_lookup_elem);
|
||||
}
|
||||
|
||||
static int dev_map_btf_id;
|
||||
const struct bpf_map_ops dev_map_ops = {
|
||||
.map_meta_equal = bpf_map_meta_equal,
|
||||
|
@ -747,6 +756,7 @@ const struct bpf_map_ops dev_map_ops = {
|
|||
.map_check_btf = map_check_no_btf,
|
||||
.map_btf_name = "bpf_dtab",
|
||||
.map_btf_id = &dev_map_btf_id,
|
||||
.map_redirect = dev_map_redirect,
|
||||
};
|
||||
|
||||
static int dev_map_hash_map_btf_id;
|
||||
|
@ -761,6 +771,7 @@ const struct bpf_map_ops dev_map_hash_ops = {
|
|||
.map_check_btf = map_check_no_btf,
|
||||
.map_btf_name = "bpf_dtab",
|
||||
.map_btf_id = &dev_map_hash_map_btf_id,
|
||||
.map_redirect = dev_hash_map_redirect,
|
||||
};
|
||||
|
||||
static void dev_map_hash_remove_netdev(struct bpf_dtab *dtab,
|
||||
|
|
|
@ -5582,7 +5582,8 @@ record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
|
|||
func_id != BPF_FUNC_map_push_elem &&
|
||||
func_id != BPF_FUNC_map_pop_elem &&
|
||||
func_id != BPF_FUNC_map_peek_elem &&
|
||||
func_id != BPF_FUNC_for_each_map_elem)
|
||||
func_id != BPF_FUNC_for_each_map_elem &&
|
||||
func_id != BPF_FUNC_redirect_map)
|
||||
return 0;
|
||||
|
||||
if (map == NULL) {
|
||||
|
@ -12017,7 +12018,8 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
|
|||
insn->imm == BPF_FUNC_map_delete_elem ||
|
||||
insn->imm == BPF_FUNC_map_push_elem ||
|
||||
insn->imm == BPF_FUNC_map_pop_elem ||
|
||||
insn->imm == BPF_FUNC_map_peek_elem)) {
|
||||
insn->imm == BPF_FUNC_map_peek_elem ||
|
||||
insn->imm == BPF_FUNC_redirect_map)) {
|
||||
aux = &env->insn_aux_data[i + delta];
|
||||
if (bpf_map_ptr_poisoned(aux))
|
||||
goto patch_call_imm;
|
||||
|
@ -12059,6 +12061,9 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
|
|||
(int (*)(struct bpf_map *map, void *value))NULL));
|
||||
BUILD_BUG_ON(!__same_type(ops->map_peek_elem,
|
||||
(int (*)(struct bpf_map *map, void *value))NULL));
|
||||
BUILD_BUG_ON(!__same_type(ops->map_redirect,
|
||||
(int (*)(struct bpf_map *map, u32 ifindex, u64 flags))NULL));
|
||||
|
||||
patch_map_ops_generic:
|
||||
switch (insn->imm) {
|
||||
case BPF_FUNC_map_lookup_elem:
|
||||
|
@ -12085,6 +12090,10 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
|
|||
insn->imm = BPF_CAST_CALL(ops->map_peek_elem) -
|
||||
__bpf_call_base;
|
||||
continue;
|
||||
case BPF_FUNC_redirect_map:
|
||||
insn->imm = BPF_CAST_CALL(ops->map_redirect) -
|
||||
__bpf_call_base;
|
||||
continue;
|
||||
}
|
||||
|
||||
goto patch_call_imm;
|
||||
|
|
|
@ -3918,23 +3918,6 @@ static const struct bpf_func_proto bpf_xdp_adjust_meta_proto = {
|
|||
.arg2_type = ARG_ANYTHING,
|
||||
};
|
||||
|
||||
static int __bpf_tx_xdp_map(struct net_device *dev_rx, void *fwd,
|
||||
struct bpf_map *map, struct xdp_buff *xdp)
|
||||
{
|
||||
switch (map->map_type) {
|
||||
case BPF_MAP_TYPE_DEVMAP:
|
||||
case BPF_MAP_TYPE_DEVMAP_HASH:
|
||||
return dev_map_enqueue(fwd, xdp, dev_rx);
|
||||
case BPF_MAP_TYPE_CPUMAP:
|
||||
return cpu_map_enqueue(fwd, xdp, dev_rx);
|
||||
case BPF_MAP_TYPE_XSKMAP:
|
||||
return __xsk_map_redirect(fwd, xdp);
|
||||
default:
|
||||
return -EBADRQC;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void xdp_do_flush(void)
|
||||
{
|
||||
__dev_flush();
|
||||
|
@ -3943,71 +3926,52 @@ void xdp_do_flush(void)
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(xdp_do_flush);
|
||||
|
||||
static inline void *__xdp_map_lookup_elem(struct bpf_map *map, u32 index)
|
||||
{
|
||||
switch (map->map_type) {
|
||||
case BPF_MAP_TYPE_DEVMAP:
|
||||
return __dev_map_lookup_elem(map, index);
|
||||
case BPF_MAP_TYPE_DEVMAP_HASH:
|
||||
return __dev_map_hash_lookup_elem(map, index);
|
||||
case BPF_MAP_TYPE_CPUMAP:
|
||||
return __cpu_map_lookup_elem(map, index);
|
||||
case BPF_MAP_TYPE_XSKMAP:
|
||||
return __xsk_map_lookup_elem(map, index);
|
||||
default:
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void bpf_clear_redirect_map(struct bpf_map *map)
|
||||
{
|
||||
struct bpf_redirect_info *ri;
|
||||
int cpu;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
ri = per_cpu_ptr(&bpf_redirect_info, cpu);
|
||||
/* Avoid polluting remote cacheline due to writes if
|
||||
* not needed. Once we pass this test, we need the
|
||||
* cmpxchg() to make sure it hasn't been changed in
|
||||
* the meantime by remote CPU.
|
||||
*/
|
||||
if (unlikely(READ_ONCE(ri->map) == map))
|
||||
cmpxchg(&ri->map, map, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp,
|
||||
struct bpf_prog *xdp_prog)
|
||||
{
|
||||
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
|
||||
struct bpf_map *map = READ_ONCE(ri->map);
|
||||
u32 index = ri->tgt_index;
|
||||
enum bpf_map_type map_type = ri->map_type;
|
||||
void *fwd = ri->tgt_value;
|
||||
u32 map_id = ri->map_id;
|
||||
int err;
|
||||
|
||||
ri->tgt_index = 0;
|
||||
ri->tgt_value = NULL;
|
||||
WRITE_ONCE(ri->map, NULL);
|
||||
ri->map_id = 0; /* Valid map id idr range: [1,INT_MAX[ */
|
||||
ri->map_type = BPF_MAP_TYPE_UNSPEC;
|
||||
|
||||
if (unlikely(!map)) {
|
||||
fwd = dev_get_by_index_rcu(dev_net(dev), index);
|
||||
switch (map_type) {
|
||||
case BPF_MAP_TYPE_DEVMAP:
|
||||
fallthrough;
|
||||
case BPF_MAP_TYPE_DEVMAP_HASH:
|
||||
err = dev_map_enqueue(fwd, xdp, dev);
|
||||
break;
|
||||
case BPF_MAP_TYPE_CPUMAP:
|
||||
err = cpu_map_enqueue(fwd, xdp, dev);
|
||||
break;
|
||||
case BPF_MAP_TYPE_XSKMAP:
|
||||
err = __xsk_map_redirect(fwd, xdp);
|
||||
break;
|
||||
case BPF_MAP_TYPE_UNSPEC:
|
||||
if (map_id == INT_MAX) {
|
||||
fwd = dev_get_by_index_rcu(dev_net(dev), ri->tgt_index);
|
||||
if (unlikely(!fwd)) {
|
||||
err = -EINVAL;
|
||||
goto err;
|
||||
break;
|
||||
}
|
||||
|
||||
err = dev_xdp_enqueue(fwd, xdp, dev);
|
||||
} else {
|
||||
err = __bpf_tx_xdp_map(dev, fwd, map, xdp);
|
||||
break;
|
||||
}
|
||||
fallthrough;
|
||||
default:
|
||||
err = -EBADRQC;
|
||||
}
|
||||
|
||||
if (unlikely(err))
|
||||
goto err;
|
||||
|
||||
_trace_xdp_redirect_map(dev, xdp_prog, fwd, map, index);
|
||||
_trace_xdp_redirect_map(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index);
|
||||
return 0;
|
||||
err:
|
||||
_trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map, index, err);
|
||||
_trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index, err);
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(xdp_do_redirect);
|
||||
|
@ -4016,41 +3980,36 @@ static int xdp_do_generic_redirect_map(struct net_device *dev,
|
|||
struct sk_buff *skb,
|
||||
struct xdp_buff *xdp,
|
||||
struct bpf_prog *xdp_prog,
|
||||
struct bpf_map *map)
|
||||
void *fwd,
|
||||
enum bpf_map_type map_type, u32 map_id)
|
||||
{
|
||||
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
|
||||
u32 index = ri->tgt_index;
|
||||
void *fwd = ri->tgt_value;
|
||||
int err = 0;
|
||||
int err;
|
||||
|
||||
ri->tgt_index = 0;
|
||||
ri->tgt_value = NULL;
|
||||
WRITE_ONCE(ri->map, NULL);
|
||||
|
||||
if (map->map_type == BPF_MAP_TYPE_DEVMAP ||
|
||||
map->map_type == BPF_MAP_TYPE_DEVMAP_HASH) {
|
||||
struct bpf_dtab_netdev *dst = fwd;
|
||||
|
||||
err = dev_map_generic_redirect(dst, skb, xdp_prog);
|
||||
switch (map_type) {
|
||||
case BPF_MAP_TYPE_DEVMAP:
|
||||
fallthrough;
|
||||
case BPF_MAP_TYPE_DEVMAP_HASH:
|
||||
err = dev_map_generic_redirect(fwd, skb, xdp_prog);
|
||||
if (unlikely(err))
|
||||
goto err;
|
||||
} else if (map->map_type == BPF_MAP_TYPE_XSKMAP) {
|
||||
struct xdp_sock *xs = fwd;
|
||||
|
||||
err = xsk_generic_rcv(xs, xdp);
|
||||
break;
|
||||
case BPF_MAP_TYPE_XSKMAP:
|
||||
err = xsk_generic_rcv(fwd, xdp);
|
||||
if (err)
|
||||
goto err;
|
||||
consume_skb(skb);
|
||||
} else {
|
||||
break;
|
||||
default:
|
||||
/* TODO: Handle BPF_MAP_TYPE_CPUMAP */
|
||||
err = -EBADRQC;
|
||||
goto err;
|
||||
}
|
||||
|
||||
_trace_xdp_redirect_map(dev, xdp_prog, fwd, map, index);
|
||||
_trace_xdp_redirect_map(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index);
|
||||
return 0;
|
||||
err:
|
||||
_trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map, index, err);
|
||||
_trace_xdp_redirect_map_err(dev, xdp_prog, fwd, map_type, map_id, ri->tgt_index, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -4058,16 +4017,16 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
|
|||
struct xdp_buff *xdp, struct bpf_prog *xdp_prog)
|
||||
{
|
||||
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
|
||||
struct bpf_map *map = READ_ONCE(ri->map);
|
||||
u32 index = ri->tgt_index;
|
||||
struct net_device *fwd;
|
||||
int err = 0;
|
||||
enum bpf_map_type map_type = ri->map_type;
|
||||
void *fwd = ri->tgt_value;
|
||||
u32 map_id = ri->map_id;
|
||||
int err;
|
||||
|
||||
if (map)
|
||||
return xdp_do_generic_redirect_map(dev, skb, xdp, xdp_prog,
|
||||
map);
|
||||
ri->tgt_index = 0;
|
||||
fwd = dev_get_by_index_rcu(dev_net(dev), index);
|
||||
ri->map_id = 0; /* Valid map id idr range: [1,INT_MAX[ */
|
||||
ri->map_type = BPF_MAP_TYPE_UNSPEC;
|
||||
|
||||
if (map_type == BPF_MAP_TYPE_UNSPEC && map_id == INT_MAX) {
|
||||
fwd = dev_get_by_index_rcu(dev_net(dev), ri->tgt_index);
|
||||
if (unlikely(!fwd)) {
|
||||
err = -EINVAL;
|
||||
goto err;
|
||||
|
@ -4078,11 +4037,14 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
|
|||
goto err;
|
||||
|
||||
skb->dev = fwd;
|
||||
_trace_xdp_redirect(dev, xdp_prog, index);
|
||||
_trace_xdp_redirect(dev, xdp_prog, ri->tgt_index);
|
||||
generic_xdp_tx(skb, xdp_prog);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return xdp_do_generic_redirect_map(dev, skb, xdp, xdp_prog, fwd, map_type, map_id);
|
||||
err:
|
||||
_trace_xdp_redirect_err(dev, xdp_prog, index, err);
|
||||
_trace_xdp_redirect_err(dev, xdp_prog, ri->tgt_index, err);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -4093,10 +4055,12 @@ BPF_CALL_2(bpf_xdp_redirect, u32, ifindex, u64, flags)
|
|||
if (unlikely(flags))
|
||||
return XDP_ABORTED;
|
||||
|
||||
ri->flags = flags;
|
||||
/* NB! Map type UNSPEC and map_id == INT_MAX (never generated
|
||||
* by map_idr) is used for ifindex based XDP redirect.
|
||||
*/
|
||||
ri->tgt_index = ifindex;
|
||||
ri->tgt_value = NULL;
|
||||
WRITE_ONCE(ri->map, NULL);
|
||||
ri->map_id = INT_MAX;
|
||||
ri->map_type = BPF_MAP_TYPE_UNSPEC;
|
||||
|
||||
return XDP_REDIRECT;
|
||||
}
|
||||
|
@ -4112,28 +4076,7 @@ static const struct bpf_func_proto bpf_xdp_redirect_proto = {
|
|||
BPF_CALL_3(bpf_xdp_redirect_map, struct bpf_map *, map, u32, ifindex,
|
||||
u64, flags)
|
||||
{
|
||||
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
|
||||
|
||||
/* Lower bits of the flags are used as return code on lookup failure */
|
||||
if (unlikely(flags > XDP_TX))
|
||||
return XDP_ABORTED;
|
||||
|
||||
ri->tgt_value = __xdp_map_lookup_elem(map, ifindex);
|
||||
if (unlikely(!ri->tgt_value)) {
|
||||
/* If the lookup fails we want to clear out the state in the
|
||||
* redirect_info struct completely, so that if an eBPF program
|
||||
* performs multiple lookups, the last one always takes
|
||||
* precedence.
|
||||
*/
|
||||
WRITE_ONCE(ri->map, NULL);
|
||||
return flags;
|
||||
}
|
||||
|
||||
ri->flags = flags;
|
||||
ri->tgt_index = ifindex;
|
||||
WRITE_ONCE(ri->map, map);
|
||||
|
||||
return XDP_REDIRECT;
|
||||
return map->ops->map_redirect(map, ifindex, flags);
|
||||
}
|
||||
|
||||
static const struct bpf_func_proto bpf_xdp_redirect_map_proto = {
|
||||
|
|
|
@ -87,7 +87,6 @@ static void xsk_map_free(struct bpf_map *map)
|
|||
{
|
||||
struct xsk_map *m = container_of(map, struct xsk_map, map);
|
||||
|
||||
bpf_clear_redirect_map(map);
|
||||
synchronize_net();
|
||||
bpf_map_area_free(m);
|
||||
}
|
||||
|
@ -125,6 +124,16 @@ static int xsk_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
|
|||
return insn - insn_buf;
|
||||
}
|
||||
|
||||
static void *__xsk_map_lookup_elem(struct bpf_map *map, u32 key)
|
||||
{
|
||||
struct xsk_map *m = container_of(map, struct xsk_map, map);
|
||||
|
||||
if (key >= map->max_entries)
|
||||
return NULL;
|
||||
|
||||
return READ_ONCE(m->xsk_map[key]);
|
||||
}
|
||||
|
||||
static void *xsk_map_lookup_elem(struct bpf_map *map, void *key)
|
||||
{
|
||||
WARN_ON_ONCE(!rcu_read_lock_held());
|
||||
|
@ -215,6 +224,11 @@ static int xsk_map_delete_elem(struct bpf_map *map, void *key)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int xsk_map_redirect(struct bpf_map *map, u32 ifindex, u64 flags)
|
||||
{
|
||||
return __bpf_xdp_redirect_map(map, ifindex, flags, __xsk_map_lookup_elem);
|
||||
}
|
||||
|
||||
void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs,
|
||||
struct xdp_sock **map_entry)
|
||||
{
|
||||
|
@ -247,4 +261,5 @@ const struct bpf_map_ops xsk_map_ops = {
|
|||
.map_check_btf = map_check_no_btf,
|
||||
.map_btf_name = "xsk_map",
|
||||
.map_btf_id = &xsk_map_btf_id,
|
||||
.map_redirect = xsk_map_redirect,
|
||||
};
|
||||
|
|
Loading…
Reference in New Issue