2014-09-26 15:16:57 +08:00
|
|
|
/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of version 2 of the GNU General Public
|
|
|
|
* License as published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful, but
|
|
|
|
* WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* General Public License for more details.
|
|
|
|
*/
|
|
|
|
#include <linux/bpf.h>
|
|
|
|
#include <linux/syscalls.h>
|
|
|
|
#include <linux/slab.h>
|
|
|
|
#include <linux/anon_inodes.h>
|
bpf: add lookup/update/delete/iterate methods to BPF maps
'maps' is a generic storage of different types for sharing data between kernel
and userspace.
The maps are accessed from user space via BPF syscall, which has commands:
- create a map with given type and attributes
fd = bpf(BPF_MAP_CREATE, union bpf_attr *attr, u32 size)
returns fd or negative error
- lookup key in a given map referenced by fd
err = bpf(BPF_MAP_LOOKUP_ELEM, union bpf_attr *attr, u32 size)
using attr->map_fd, attr->key, attr->value
returns zero and stores found elem into value or negative error
- create or update key/value pair in a given map
err = bpf(BPF_MAP_UPDATE_ELEM, union bpf_attr *attr, u32 size)
using attr->map_fd, attr->key, attr->value
returns zero or negative error
- find and delete element by key in a given map
err = bpf(BPF_MAP_DELETE_ELEM, union bpf_attr *attr, u32 size)
using attr->map_fd, attr->key
- iterate map elements (based on input key return next_key)
err = bpf(BPF_MAP_GET_NEXT_KEY, union bpf_attr *attr, u32 size)
using attr->map_fd, attr->key, attr->next_key
- close(fd) deletes the map
Signed-off-by: Alexei Starovoitov <ast@plumgrid.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-09-26 15:16:59 +08:00
|
|
|
#include <linux/file.h>
|
2014-09-26 15:17:00 +08:00
|
|
|
#include <linux/license.h>
|
|
|
|
#include <linux/filter.h>
|
tracing, perf: Implement BPF programs attached to kprobes
BPF programs, attached to kprobes, provide a safe way to execute
user-defined BPF byte-code programs without being able to crash or
hang the kernel in any way. The BPF engine makes sure that such
programs have a finite execution time and that they cannot break
out of their sandbox.
The user interface is to attach to a kprobe via the perf syscall:
struct perf_event_attr attr = {
.type = PERF_TYPE_TRACEPOINT,
.config = event_id,
...
};
event_fd = perf_event_open(&attr,...);
ioctl(event_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
'prog_fd' is a file descriptor associated with BPF program
previously loaded.
'event_id' is an ID of the kprobe created.
Closing 'event_fd':
close(event_fd);
... automatically detaches BPF program from it.
BPF programs can call in-kernel helper functions to:
- lookup/update/delete elements in maps
- probe_read - wraper of probe_kernel_read() used to access any
kernel data structures
BPF programs receive 'struct pt_regs *' as an input ('struct pt_regs' is
architecture dependent) and return 0 to ignore the event and 1 to store
kprobe event into the ring buffer.
Note, kprobes are a fundamentally _not_ a stable kernel ABI,
so BPF programs attached to kprobes must be recompiled for
every kernel version and user must supply correct LINUX_VERSION_CODE
in attr.kern_version during bpf_prog_load() call.
Signed-off-by: Alexei Starovoitov <ast@plumgrid.com>
Reviewed-by: Steven Rostedt <rostedt@goodmis.org>
Reviewed-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: David S. Miller <davem@davemloft.net>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1427312966-8434-4-git-send-email-ast@plumgrid.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-03-26 03:49:20 +08:00
|
|
|
#include <linux/version.h>
|
2014-09-26 15:16:57 +08:00
|
|
|
|
|
|
|
static LIST_HEAD(bpf_map_types);
|
|
|
|
|
|
|
|
static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
|
|
|
|
{
|
|
|
|
struct bpf_map_type_list *tl;
|
|
|
|
struct bpf_map *map;
|
|
|
|
|
|
|
|
list_for_each_entry(tl, &bpf_map_types, list_node) {
|
|
|
|
if (tl->type == attr->map_type) {
|
|
|
|
map = tl->ops->map_alloc(attr);
|
|
|
|
if (IS_ERR(map))
|
|
|
|
return map;
|
|
|
|
map->ops = tl->ops;
|
|
|
|
map->map_type = attr->map_type;
|
|
|
|
return map;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* boot time registration of different map implementations */
|
|
|
|
void bpf_register_map_type(struct bpf_map_type_list *tl)
|
|
|
|
{
|
|
|
|
list_add(&tl->list_node, &bpf_map_types);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* called from workqueue */
|
|
|
|
static void bpf_map_free_deferred(struct work_struct *work)
|
|
|
|
{
|
|
|
|
struct bpf_map *map = container_of(work, struct bpf_map, work);
|
|
|
|
|
|
|
|
/* implementation dependent freeing */
|
|
|
|
map->ops->map_free(map);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* decrement map refcnt and schedule it for freeing via workqueue
|
|
|
|
* (unrelying map implementation ops->map_free() might sleep)
|
|
|
|
*/
|
|
|
|
void bpf_map_put(struct bpf_map *map)
|
|
|
|
{
|
|
|
|
if (atomic_dec_and_test(&map->refcnt)) {
|
|
|
|
INIT_WORK(&map->work, bpf_map_free_deferred);
|
|
|
|
schedule_work(&map->work);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static int bpf_map_release(struct inode *inode, struct file *filp)
|
|
|
|
{
|
|
|
|
struct bpf_map *map = filp->private_data;
|
|
|
|
|
bpf: allow bpf programs to tail-call other bpf programs
introduce bpf_tail_call(ctx, &jmp_table, index) helper function
which can be used from BPF programs like:
int bpf_prog(struct pt_regs *ctx)
{
...
bpf_tail_call(ctx, &jmp_table, index);
...
}
that is roughly equivalent to:
int bpf_prog(struct pt_regs *ctx)
{
...
if (jmp_table[index])
return (*jmp_table[index])(ctx);
...
}
The important detail that it's not a normal call, but a tail call.
The kernel stack is precious, so this helper reuses the current
stack frame and jumps into another BPF program without adding
extra call frame.
It's trivially done in interpreter and a bit trickier in JITs.
In case of x64 JIT the bigger part of generated assembler prologue
is common for all programs, so it is simply skipped while jumping.
Other JITs can do similar prologue-skipping optimization or
do stack unwind before jumping into the next program.
bpf_tail_call() arguments:
ctx - context pointer
jmp_table - one of BPF_MAP_TYPE_PROG_ARRAY maps used as the jump table
index - index in the jump table
Since all BPF programs are idenitified by file descriptor, user space
need to populate the jmp_table with FDs of other BPF programs.
If jmp_table[index] is empty the bpf_tail_call() doesn't jump anywhere
and program execution continues as normal.
New BPF_MAP_TYPE_PROG_ARRAY map type is introduced so that user space can
populate this jmp_table array with FDs of other bpf programs.
Programs can share the same jmp_table array or use multiple jmp_tables.
The chain of tail calls can form unpredictable dynamic loops therefore
tail_call_cnt is used to limit the number of calls and currently is set to 32.
Use cases:
Acked-by: Daniel Borkmann <daniel@iogearbox.net>
==========
- simplify complex programs by splitting them into a sequence of small programs
- dispatch routine
For tracing and future seccomp the program may be triggered on all system
calls, but processing of syscall arguments will be different. It's more
efficient to implement them as:
int syscall_entry(struct seccomp_data *ctx)
{
bpf_tail_call(ctx, &syscall_jmp_table, ctx->nr /* syscall number */);
... default: process unknown syscall ...
}
int sys_write_event(struct seccomp_data *ctx) {...}
int sys_read_event(struct seccomp_data *ctx) {...}
syscall_jmp_table[__NR_write] = sys_write_event;
syscall_jmp_table[__NR_read] = sys_read_event;
For networking the program may call into different parsers depending on
packet format, like:
int packet_parser(struct __sk_buff *skb)
{
... parse L2, L3 here ...
__u8 ipproto = load_byte(skb, ... offsetof(struct iphdr, protocol));
bpf_tail_call(skb, &ipproto_jmp_table, ipproto);
... default: process unknown protocol ...
}
int parse_tcp(struct __sk_buff *skb) {...}
int parse_udp(struct __sk_buff *skb) {...}
ipproto_jmp_table[IPPROTO_TCP] = parse_tcp;
ipproto_jmp_table[IPPROTO_UDP] = parse_udp;
- for TC use case, bpf_tail_call() allows to implement reclassify-like logic
- bpf_map_update_elem/delete calls into BPF_MAP_TYPE_PROG_ARRAY jump table
are atomic, so user space can build chains of BPF programs on the fly
Implementation details:
=======================
- high performance of bpf_tail_call() is the goal.
It could have been implemented without JIT changes as a wrapper on top of
BPF_PROG_RUN() macro, but with two downsides:
. all programs would have to pay performance penalty for this feature and
tail call itself would be slower, since mandatory stack unwind, return,
stack allocate would be done for every tailcall.
. tailcall would be limited to programs running preempt_disabled, since
generic 'void *ctx' doesn't have room for 'tail_call_cnt' and it would
need to be either global per_cpu variable accessed by helper and by wrapper
or global variable protected by locks.
In this implementation x64 JIT bypasses stack unwind and jumps into the
callee program after prologue.
- bpf_prog_array_compatible() ensures that prog_type of callee and caller
are the same and JITed/non-JITed flag is the same, since calling JITed
program from non-JITed is invalid, since stack frames are different.
Similarly calling kprobe type program from socket type program is invalid.
- jump table is implemented as BPF_MAP_TYPE_PROG_ARRAY to reuse 'map'
abstraction, its user space API and all of verifier logic.
It's in the existing arraymap.c file, since several functions are
shared with regular array map.
Signed-off-by: Alexei Starovoitov <ast@plumgrid.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-05-20 07:59:03 +08:00
|
|
|
if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)
|
|
|
|
/* prog_array stores refcnt-ed bpf_prog pointers
|
|
|
|
* release them all when user space closes prog_array_fd
|
|
|
|
*/
|
|
|
|
bpf_prog_array_map_clear(map);
|
|
|
|
|
2014-09-26 15:16:57 +08:00
|
|
|
bpf_map_put(map);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations bpf_map_fops = {
|
|
|
|
.release = bpf_map_release,
|
|
|
|
};
|
|
|
|
|
|
|
|
/* helper macro to check that unused fields 'union bpf_attr' are zero */
|
|
|
|
#define CHECK_ATTR(CMD) \
|
|
|
|
memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
|
|
|
|
sizeof(attr->CMD##_LAST_FIELD), 0, \
|
|
|
|
sizeof(*attr) - \
|
|
|
|
offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
|
|
|
|
sizeof(attr->CMD##_LAST_FIELD)) != NULL
|
|
|
|
|
|
|
|
#define BPF_MAP_CREATE_LAST_FIELD max_entries
|
|
|
|
/* called via syscall */
|
|
|
|
static int map_create(union bpf_attr *attr)
|
|
|
|
{
|
|
|
|
struct bpf_map *map;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
err = CHECK_ATTR(BPF_MAP_CREATE);
|
|
|
|
if (err)
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* find map type and init map: hashtable vs rbtree vs bloom vs ... */
|
|
|
|
map = find_and_alloc_map(attr);
|
|
|
|
if (IS_ERR(map))
|
|
|
|
return PTR_ERR(map);
|
|
|
|
|
|
|
|
atomic_set(&map->refcnt, 1);
|
|
|
|
|
|
|
|
err = anon_inode_getfd("bpf-map", &bpf_map_fops, map, O_RDWR | O_CLOEXEC);
|
|
|
|
|
|
|
|
if (err < 0)
|
|
|
|
/* failed to allocate fd */
|
|
|
|
goto free_map;
|
|
|
|
|
|
|
|
return err;
|
|
|
|
|
|
|
|
free_map:
|
|
|
|
map->ops->map_free(map);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
bpf: add lookup/update/delete/iterate methods to BPF maps
'maps' is a generic storage of different types for sharing data between kernel
and userspace.
The maps are accessed from user space via BPF syscall, which has commands:
- create a map with given type and attributes
fd = bpf(BPF_MAP_CREATE, union bpf_attr *attr, u32 size)
returns fd or negative error
- lookup key in a given map referenced by fd
err = bpf(BPF_MAP_LOOKUP_ELEM, union bpf_attr *attr, u32 size)
using attr->map_fd, attr->key, attr->value
returns zero and stores found elem into value or negative error
- create or update key/value pair in a given map
err = bpf(BPF_MAP_UPDATE_ELEM, union bpf_attr *attr, u32 size)
using attr->map_fd, attr->key, attr->value
returns zero or negative error
- find and delete element by key in a given map
err = bpf(BPF_MAP_DELETE_ELEM, union bpf_attr *attr, u32 size)
using attr->map_fd, attr->key
- iterate map elements (based on input key return next_key)
err = bpf(BPF_MAP_GET_NEXT_KEY, union bpf_attr *attr, u32 size)
using attr->map_fd, attr->key, attr->next_key
- close(fd) deletes the map
Signed-off-by: Alexei Starovoitov <ast@plumgrid.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-09-26 15:16:59 +08:00
|
|
|
/* if error is returned, fd is released.
|
|
|
|
* On success caller should complete fd access with matching fdput()
|
|
|
|
*/
|
|
|
|
struct bpf_map *bpf_map_get(struct fd f)
|
|
|
|
{
|
|
|
|
struct bpf_map *map;
|
|
|
|
|
|
|
|
if (!f.file)
|
|
|
|
return ERR_PTR(-EBADF);
|
|
|
|
|
|
|
|
if (f.file->f_op != &bpf_map_fops) {
|
|
|
|
fdput(f);
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
}
|
|
|
|
|
|
|
|
map = f.file->private_data;
|
|
|
|
|
|
|
|
return map;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* helper to convert user pointers passed inside __aligned_u64 fields */
|
|
|
|
static void __user *u64_to_ptr(__u64 val)
|
|
|
|
{
|
|
|
|
return (void __user *) (unsigned long) val;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* last field in 'union bpf_attr' used by this command */
|
|
|
|
#define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value
|
|
|
|
|
|
|
|
static int map_lookup_elem(union bpf_attr *attr)
|
|
|
|
{
|
|
|
|
void __user *ukey = u64_to_ptr(attr->key);
|
|
|
|
void __user *uvalue = u64_to_ptr(attr->value);
|
|
|
|
int ufd = attr->map_fd;
|
|
|
|
struct fd f = fdget(ufd);
|
|
|
|
struct bpf_map *map;
|
2015-01-23 09:11:08 +08:00
|
|
|
void *key, *value, *ptr;
|
bpf: add lookup/update/delete/iterate methods to BPF maps
'maps' is a generic storage of different types for sharing data between kernel
and userspace.
The maps are accessed from user space via BPF syscall, which has commands:
- create a map with given type and attributes
fd = bpf(BPF_MAP_CREATE, union bpf_attr *attr, u32 size)
returns fd or negative error
- lookup key in a given map referenced by fd
err = bpf(BPF_MAP_LOOKUP_ELEM, union bpf_attr *attr, u32 size)
using attr->map_fd, attr->key, attr->value
returns zero and stores found elem into value or negative error
- create or update key/value pair in a given map
err = bpf(BPF_MAP_UPDATE_ELEM, union bpf_attr *attr, u32 size)
using attr->map_fd, attr->key, attr->value
returns zero or negative error
- find and delete element by key in a given map
err = bpf(BPF_MAP_DELETE_ELEM, union bpf_attr *attr, u32 size)
using attr->map_fd, attr->key
- iterate map elements (based on input key return next_key)
err = bpf(BPF_MAP_GET_NEXT_KEY, union bpf_attr *attr, u32 size)
using attr->map_fd, attr->key, attr->next_key
- close(fd) deletes the map
Signed-off-by: Alexei Starovoitov <ast@plumgrid.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-09-26 15:16:59 +08:00
|
|
|
int err;
|
|
|
|
|
|
|
|
if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
map = bpf_map_get(f);
|
|
|
|
if (IS_ERR(map))
|
|
|
|
return PTR_ERR(map);
|
|
|
|
|
|
|
|
err = -ENOMEM;
|
|
|
|
key = kmalloc(map->key_size, GFP_USER);
|
|
|
|
if (!key)
|
|
|
|
goto err_put;
|
|
|
|
|
|
|
|
err = -EFAULT;
|
|
|
|
if (copy_from_user(key, ukey, map->key_size) != 0)
|
|
|
|
goto free_key;
|
|
|
|
|
2015-01-23 09:11:08 +08:00
|
|
|
err = -ENOMEM;
|
|
|
|
value = kmalloc(map->value_size, GFP_USER);
|
bpf: add lookup/update/delete/iterate methods to BPF maps
'maps' is a generic storage of different types for sharing data between kernel
and userspace.
The maps are accessed from user space via BPF syscall, which has commands:
- create a map with given type and attributes
fd = bpf(BPF_MAP_CREATE, union bpf_attr *attr, u32 size)
returns fd or negative error
- lookup key in a given map referenced by fd
err = bpf(BPF_MAP_LOOKUP_ELEM, union bpf_attr *attr, u32 size)
using attr->map_fd, attr->key, attr->value
returns zero and stores found elem into value or negative error
- create or update key/value pair in a given map
err = bpf(BPF_MAP_UPDATE_ELEM, union bpf_attr *attr, u32 size)
using attr->map_fd, attr->key, attr->value
returns zero or negative error
- find and delete element by key in a given map
err = bpf(BPF_MAP_DELETE_ELEM, union bpf_attr *attr, u32 size)
using attr->map_fd, attr->key
- iterate map elements (based on input key return next_key)
err = bpf(BPF_MAP_GET_NEXT_KEY, union bpf_attr *attr, u32 size)
using attr->map_fd, attr->key, attr->next_key
- close(fd) deletes the map
Signed-off-by: Alexei Starovoitov <ast@plumgrid.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-09-26 15:16:59 +08:00
|
|
|
if (!value)
|
2015-01-23 09:11:08 +08:00
|
|
|
goto free_key;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
ptr = map->ops->map_lookup_elem(map, key);
|
|
|
|
if (ptr)
|
|
|
|
memcpy(value, ptr, map->value_size);
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
err = -ENOENT;
|
|
|
|
if (!ptr)
|
|
|
|
goto free_value;
|
bpf: add lookup/update/delete/iterate methods to BPF maps
'maps' is a generic storage of different types for sharing data between kernel
and userspace.
The maps are accessed from user space via BPF syscall, which has commands:
- create a map with given type and attributes
fd = bpf(BPF_MAP_CREATE, union bpf_attr *attr, u32 size)
returns fd or negative error
- lookup key in a given map referenced by fd
err = bpf(BPF_MAP_LOOKUP_ELEM, union bpf_attr *attr, u32 size)
using attr->map_fd, attr->key, attr->value
returns zero and stores found elem into value or negative error
- create or update key/value pair in a given map
err = bpf(BPF_MAP_UPDATE_ELEM, union bpf_attr *attr, u32 size)
using attr->map_fd, attr->key, attr->value
returns zero or negative error
- find and delete element by key in a given map
err = bpf(BPF_MAP_DELETE_ELEM, union bpf_attr *attr, u32 size)
using attr->map_fd, attr->key
- iterate map elements (based on input key return next_key)
err = bpf(BPF_MAP_GET_NEXT_KEY, union bpf_attr *attr, u32 size)
using attr->map_fd, attr->key, attr->next_key
- close(fd) deletes the map
Signed-off-by: Alexei Starovoitov <ast@plumgrid.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-09-26 15:16:59 +08:00
|
|
|
|
|
|
|
err = -EFAULT;
|
|
|
|
if (copy_to_user(uvalue, value, map->value_size) != 0)
|
2015-01-23 09:11:08 +08:00
|
|
|
goto free_value;
|
bpf: add lookup/update/delete/iterate methods to BPF maps
'maps' is a generic storage of different types for sharing data between kernel
and userspace.
The maps are accessed from user space via BPF syscall, which has commands:
- create a map with given type and attributes
fd = bpf(BPF_MAP_CREATE, union bpf_attr *attr, u32 size)
returns fd or negative error
- lookup key in a given map referenced by fd
err = bpf(BPF_MAP_LOOKUP_ELEM, union bpf_attr *attr, u32 size)
using attr->map_fd, attr->key, attr->value
returns zero and stores found elem into value or negative error
- create or update key/value pair in a given map
err = bpf(BPF_MAP_UPDATE_ELEM, union bpf_attr *attr, u32 size)
using attr->map_fd, attr->key, attr->value
returns zero or negative error
- find and delete element by key in a given map
err = bpf(BPF_MAP_DELETE_ELEM, union bpf_attr *attr, u32 size)
using attr->map_fd, attr->key
- iterate map elements (based on input key return next_key)
err = bpf(BPF_MAP_GET_NEXT_KEY, union bpf_attr *attr, u32 size)
using attr->map_fd, attr->key, attr->next_key
- close(fd) deletes the map
Signed-off-by: Alexei Starovoitov <ast@plumgrid.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-09-26 15:16:59 +08:00
|
|
|
|
|
|
|
err = 0;
|
|
|
|
|
2015-01-23 09:11:08 +08:00
|
|
|
free_value:
|
|
|
|
kfree(value);
|
bpf: add lookup/update/delete/iterate methods to BPF maps
'maps' is a generic storage of different types for sharing data between kernel
and userspace.
The maps are accessed from user space via BPF syscall, which has commands:
- create a map with given type and attributes
fd = bpf(BPF_MAP_CREATE, union bpf_attr *attr, u32 size)
returns fd or negative error
- lookup key in a given map referenced by fd
err = bpf(BPF_MAP_LOOKUP_ELEM, union bpf_attr *attr, u32 size)
using attr->map_fd, attr->key, attr->value
returns zero and stores found elem into value or negative error
- create or update key/value pair in a given map
err = bpf(BPF_MAP_UPDATE_ELEM, union bpf_attr *attr, u32 size)
using attr->map_fd, attr->key, attr->value
returns zero or negative error
- find and delete element by key in a given map
err = bpf(BPF_MAP_DELETE_ELEM, union bpf_attr *attr, u32 size)
using attr->map_fd, attr->key
- iterate map elements (based on input key return next_key)
err = bpf(BPF_MAP_GET_NEXT_KEY, union bpf_attr *attr, u32 size)
using attr->map_fd, attr->key, attr->next_key
- close(fd) deletes the map
Signed-off-by: Alexei Starovoitov <ast@plumgrid.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-09-26 15:16:59 +08:00
|
|
|
free_key:
|
|
|
|
kfree(key);
|
|
|
|
err_put:
|
|
|
|
fdput(f);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
bpf: add 'flags' attribute to BPF_MAP_UPDATE_ELEM command
the current meaning of BPF_MAP_UPDATE_ELEM syscall command is:
either update existing map element or create a new one.
Initially the plan was to add a new command to handle the case of
'create new element if it didn't exist', but 'flags' style looks
cleaner and overall diff is much smaller (more code reused), so add 'flags'
attribute to BPF_MAP_UPDATE_ELEM command with the following meaning:
#define BPF_ANY 0 /* create new element or update existing */
#define BPF_NOEXIST 1 /* create new element if it didn't exist */
#define BPF_EXIST 2 /* update existing element */
bpf_update_elem(fd, key, value, BPF_NOEXIST) call can fail with EEXIST
if element already exists.
bpf_update_elem(fd, key, value, BPF_EXIST) can fail with ENOENT
if element doesn't exist.
Userspace will call it as:
int bpf_update_elem(int fd, void *key, void *value, __u64 flags)
{
union bpf_attr attr = {
.map_fd = fd,
.key = ptr_to_u64(key),
.value = ptr_to_u64(value),
.flags = flags;
};
return bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr));
}
First two bits of 'flags' are used to encode style of bpf_update_elem() command.
Bits 2-63 are reserved for future use.
Signed-off-by: Alexei Starovoitov <ast@plumgrid.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-11-14 09:36:44 +08:00
|
|
|
#define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
|
bpf: add lookup/update/delete/iterate methods to BPF maps
'maps' is a generic storage of different types for sharing data between kernel
and userspace.
The maps are accessed from user space via BPF syscall, which has commands:
- create a map with given type and attributes
fd = bpf(BPF_MAP_CREATE, union bpf_attr *attr, u32 size)
returns fd or negative error
- lookup key in a given map referenced by fd
err = bpf(BPF_MAP_LOOKUP_ELEM, union bpf_attr *attr, u32 size)
using attr->map_fd, attr->key, attr->value
returns zero and stores found elem into value or negative error
- create or update key/value pair in a given map
err = bpf(BPF_MAP_UPDATE_ELEM, union bpf_attr *attr, u32 size)
using attr->map_fd, attr->key, attr->value
returns zero or negative error
- find and delete element by key in a given map
err = bpf(BPF_MAP_DELETE_ELEM, union bpf_attr *attr, u32 size)
using attr->map_fd, attr->key
- iterate map elements (based on input key return next_key)
err = bpf(BPF_MAP_GET_NEXT_KEY, union bpf_attr *attr, u32 size)
using attr->map_fd, attr->key, attr->next_key
- close(fd) deletes the map
Signed-off-by: Alexei Starovoitov <ast@plumgrid.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-09-26 15:16:59 +08:00
|
|
|
|
|
|
|
static int map_update_elem(union bpf_attr *attr)
|
|
|
|
{
|
|
|
|
void __user *ukey = u64_to_ptr(attr->key);
|
|
|
|
void __user *uvalue = u64_to_ptr(attr->value);
|
|
|
|
int ufd = attr->map_fd;
|
|
|
|
struct fd f = fdget(ufd);
|
|
|
|
struct bpf_map *map;
|
|
|
|
void *key, *value;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
map = bpf_map_get(f);
|
|
|
|
if (IS_ERR(map))
|
|
|
|
return PTR_ERR(map);
|
|
|
|
|
|
|
|
err = -ENOMEM;
|
|
|
|
key = kmalloc(map->key_size, GFP_USER);
|
|
|
|
if (!key)
|
|
|
|
goto err_put;
|
|
|
|
|
|
|
|
err = -EFAULT;
|
|
|
|
if (copy_from_user(key, ukey, map->key_size) != 0)
|
|
|
|
goto free_key;
|
|
|
|
|
|
|
|
err = -ENOMEM;
|
|
|
|
value = kmalloc(map->value_size, GFP_USER);
|
|
|
|
if (!value)
|
|
|
|
goto free_key;
|
|
|
|
|
|
|
|
err = -EFAULT;
|
|
|
|
if (copy_from_user(value, uvalue, map->value_size) != 0)
|
|
|
|
goto free_value;
|
|
|
|
|
|
|
|
/* eBPF program that use maps are running under rcu_read_lock(),
|
|
|
|
* therefore all map accessors rely on this fact, so do the same here
|
|
|
|
*/
|
|
|
|
rcu_read_lock();
|
bpf: add 'flags' attribute to BPF_MAP_UPDATE_ELEM command
the current meaning of BPF_MAP_UPDATE_ELEM syscall command is:
either update existing map element or create a new one.
Initially the plan was to add a new command to handle the case of
'create new element if it didn't exist', but 'flags' style looks
cleaner and overall diff is much smaller (more code reused), so add 'flags'
attribute to BPF_MAP_UPDATE_ELEM command with the following meaning:
#define BPF_ANY 0 /* create new element or update existing */
#define BPF_NOEXIST 1 /* create new element if it didn't exist */
#define BPF_EXIST 2 /* update existing element */
bpf_update_elem(fd, key, value, BPF_NOEXIST) call can fail with EEXIST
if element already exists.
bpf_update_elem(fd, key, value, BPF_EXIST) can fail with ENOENT
if element doesn't exist.
Userspace will call it as:
int bpf_update_elem(int fd, void *key, void *value, __u64 flags)
{
union bpf_attr attr = {
.map_fd = fd,
.key = ptr_to_u64(key),
.value = ptr_to_u64(value),
.flags = flags;
};
return bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr));
}
First two bits of 'flags' are used to encode style of bpf_update_elem() command.
Bits 2-63 are reserved for future use.
Signed-off-by: Alexei Starovoitov <ast@plumgrid.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-11-14 09:36:44 +08:00
|
|
|
err = map->ops->map_update_elem(map, key, value, attr->flags);
|
bpf: add lookup/update/delete/iterate methods to BPF maps
'maps' is a generic storage of different types for sharing data between kernel
and userspace.
The maps are accessed from user space via BPF syscall, which has commands:
- create a map with given type and attributes
fd = bpf(BPF_MAP_CREATE, union bpf_attr *attr, u32 size)
returns fd or negative error
- lookup key in a given map referenced by fd
err = bpf(BPF_MAP_LOOKUP_ELEM, union bpf_attr *attr, u32 size)
using attr->map_fd, attr->key, attr->value
returns zero and stores found elem into value or negative error
- create or update key/value pair in a given map
err = bpf(BPF_MAP_UPDATE_ELEM, union bpf_attr *attr, u32 size)
using attr->map_fd, attr->key, attr->value
returns zero or negative error
- find and delete element by key in a given map
err = bpf(BPF_MAP_DELETE_ELEM, union bpf_attr *attr, u32 size)
using attr->map_fd, attr->key
- iterate map elements (based on input key return next_key)
err = bpf(BPF_MAP_GET_NEXT_KEY, union bpf_attr *attr, u32 size)
using attr->map_fd, attr->key, attr->next_key
- close(fd) deletes the map
Signed-off-by: Alexei Starovoitov <ast@plumgrid.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-09-26 15:16:59 +08:00
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
free_value:
|
|
|
|
kfree(value);
|
|
|
|
free_key:
|
|
|
|
kfree(key);
|
|
|
|
err_put:
|
|
|
|
fdput(f);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define BPF_MAP_DELETE_ELEM_LAST_FIELD key
|
|
|
|
|
|
|
|
static int map_delete_elem(union bpf_attr *attr)
|
|
|
|
{
|
|
|
|
void __user *ukey = u64_to_ptr(attr->key);
|
|
|
|
int ufd = attr->map_fd;
|
|
|
|
struct fd f = fdget(ufd);
|
|
|
|
struct bpf_map *map;
|
|
|
|
void *key;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
map = bpf_map_get(f);
|
|
|
|
if (IS_ERR(map))
|
|
|
|
return PTR_ERR(map);
|
|
|
|
|
|
|
|
err = -ENOMEM;
|
|
|
|
key = kmalloc(map->key_size, GFP_USER);
|
|
|
|
if (!key)
|
|
|
|
goto err_put;
|
|
|
|
|
|
|
|
err = -EFAULT;
|
|
|
|
if (copy_from_user(key, ukey, map->key_size) != 0)
|
|
|
|
goto free_key;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
err = map->ops->map_delete_elem(map, key);
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
|
|
|
free_key:
|
|
|
|
kfree(key);
|
|
|
|
err_put:
|
|
|
|
fdput(f);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* last field in 'union bpf_attr' used by this command */
|
|
|
|
#define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
|
|
|
|
|
|
|
|
static int map_get_next_key(union bpf_attr *attr)
|
|
|
|
{
|
|
|
|
void __user *ukey = u64_to_ptr(attr->key);
|
|
|
|
void __user *unext_key = u64_to_ptr(attr->next_key);
|
|
|
|
int ufd = attr->map_fd;
|
|
|
|
struct fd f = fdget(ufd);
|
|
|
|
struct bpf_map *map;
|
|
|
|
void *key, *next_key;
|
|
|
|
int err;
|
|
|
|
|
|
|
|
if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
map = bpf_map_get(f);
|
|
|
|
if (IS_ERR(map))
|
|
|
|
return PTR_ERR(map);
|
|
|
|
|
|
|
|
err = -ENOMEM;
|
|
|
|
key = kmalloc(map->key_size, GFP_USER);
|
|
|
|
if (!key)
|
|
|
|
goto err_put;
|
|
|
|
|
|
|
|
err = -EFAULT;
|
|
|
|
if (copy_from_user(key, ukey, map->key_size) != 0)
|
|
|
|
goto free_key;
|
|
|
|
|
|
|
|
err = -ENOMEM;
|
|
|
|
next_key = kmalloc(map->key_size, GFP_USER);
|
|
|
|
if (!next_key)
|
|
|
|
goto free_key;
|
|
|
|
|
|
|
|
rcu_read_lock();
|
|
|
|
err = map->ops->map_get_next_key(map, key, next_key);
|
|
|
|
rcu_read_unlock();
|
|
|
|
if (err)
|
|
|
|
goto free_next_key;
|
|
|
|
|
|
|
|
err = -EFAULT;
|
|
|
|
if (copy_to_user(unext_key, next_key, map->key_size) != 0)
|
|
|
|
goto free_next_key;
|
|
|
|
|
|
|
|
err = 0;
|
|
|
|
|
|
|
|
free_next_key:
|
|
|
|
kfree(next_key);
|
|
|
|
free_key:
|
|
|
|
kfree(key);
|
|
|
|
err_put:
|
|
|
|
fdput(f);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2014-09-26 15:17:00 +08:00
|
|
|
static LIST_HEAD(bpf_prog_types);
|
|
|
|
|
|
|
|
static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
|
|
|
|
{
|
|
|
|
struct bpf_prog_type_list *tl;
|
|
|
|
|
|
|
|
list_for_each_entry(tl, &bpf_prog_types, list_node) {
|
|
|
|
if (tl->type == type) {
|
|
|
|
prog->aux->ops = tl->ops;
|
2015-03-01 19:31:47 +08:00
|
|
|
prog->type = type;
|
2014-09-26 15:17:00 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
}
|
2015-03-01 19:31:47 +08:00
|
|
|
|
2014-09-26 15:17:00 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
void bpf_register_prog_type(struct bpf_prog_type_list *tl)
|
|
|
|
{
|
|
|
|
list_add(&tl->list_node, &bpf_prog_types);
|
|
|
|
}
|
|
|
|
|
2014-09-26 15:17:01 +08:00
|
|
|
/* fixup insn->imm field of bpf_call instructions:
|
|
|
|
* if (insn->imm == BPF_FUNC_map_lookup_elem)
|
|
|
|
* insn->imm = bpf_map_lookup_elem - __bpf_call_base;
|
|
|
|
* else if (insn->imm == BPF_FUNC_map_update_elem)
|
|
|
|
* insn->imm = bpf_map_update_elem - __bpf_call_base;
|
|
|
|
* else ...
|
|
|
|
*
|
|
|
|
* this function is called after eBPF program passed verification
|
|
|
|
*/
|
|
|
|
static void fixup_bpf_calls(struct bpf_prog *prog)
|
|
|
|
{
|
|
|
|
const struct bpf_func_proto *fn;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < prog->len; i++) {
|
|
|
|
struct bpf_insn *insn = &prog->insnsi[i];
|
|
|
|
|
|
|
|
if (insn->code == (BPF_JMP | BPF_CALL)) {
|
|
|
|
/* we reach here when program has bpf_call instructions
|
|
|
|
* and it passed bpf_check(), means that
|
|
|
|
* ops->get_func_proto must have been supplied, check it
|
|
|
|
*/
|
|
|
|
BUG_ON(!prog->aux->ops->get_func_proto);
|
|
|
|
|
bpf: allow bpf programs to tail-call other bpf programs
introduce bpf_tail_call(ctx, &jmp_table, index) helper function
which can be used from BPF programs like:
int bpf_prog(struct pt_regs *ctx)
{
...
bpf_tail_call(ctx, &jmp_table, index);
...
}
that is roughly equivalent to:
int bpf_prog(struct pt_regs *ctx)
{
...
if (jmp_table[index])
return (*jmp_table[index])(ctx);
...
}
The important detail that it's not a normal call, but a tail call.
The kernel stack is precious, so this helper reuses the current
stack frame and jumps into another BPF program without adding
extra call frame.
It's trivially done in interpreter and a bit trickier in JITs.
In case of x64 JIT the bigger part of generated assembler prologue
is common for all programs, so it is simply skipped while jumping.
Other JITs can do similar prologue-skipping optimization or
do stack unwind before jumping into the next program.
bpf_tail_call() arguments:
ctx - context pointer
jmp_table - one of BPF_MAP_TYPE_PROG_ARRAY maps used as the jump table
index - index in the jump table
Since all BPF programs are idenitified by file descriptor, user space
need to populate the jmp_table with FDs of other BPF programs.
If jmp_table[index] is empty the bpf_tail_call() doesn't jump anywhere
and program execution continues as normal.
New BPF_MAP_TYPE_PROG_ARRAY map type is introduced so that user space can
populate this jmp_table array with FDs of other bpf programs.
Programs can share the same jmp_table array or use multiple jmp_tables.
The chain of tail calls can form unpredictable dynamic loops therefore
tail_call_cnt is used to limit the number of calls and currently is set to 32.
Use cases:
Acked-by: Daniel Borkmann <daniel@iogearbox.net>
==========
- simplify complex programs by splitting them into a sequence of small programs
- dispatch routine
For tracing and future seccomp the program may be triggered on all system
calls, but processing of syscall arguments will be different. It's more
efficient to implement them as:
int syscall_entry(struct seccomp_data *ctx)
{
bpf_tail_call(ctx, &syscall_jmp_table, ctx->nr /* syscall number */);
... default: process unknown syscall ...
}
int sys_write_event(struct seccomp_data *ctx) {...}
int sys_read_event(struct seccomp_data *ctx) {...}
syscall_jmp_table[__NR_write] = sys_write_event;
syscall_jmp_table[__NR_read] = sys_read_event;
For networking the program may call into different parsers depending on
packet format, like:
int packet_parser(struct __sk_buff *skb)
{
... parse L2, L3 here ...
__u8 ipproto = load_byte(skb, ... offsetof(struct iphdr, protocol));
bpf_tail_call(skb, &ipproto_jmp_table, ipproto);
... default: process unknown protocol ...
}
int parse_tcp(struct __sk_buff *skb) {...}
int parse_udp(struct __sk_buff *skb) {...}
ipproto_jmp_table[IPPROTO_TCP] = parse_tcp;
ipproto_jmp_table[IPPROTO_UDP] = parse_udp;
- for TC use case, bpf_tail_call() allows to implement reclassify-like logic
- bpf_map_update_elem/delete calls into BPF_MAP_TYPE_PROG_ARRAY jump table
are atomic, so user space can build chains of BPF programs on the fly
Implementation details:
=======================
- high performance of bpf_tail_call() is the goal.
It could have been implemented without JIT changes as a wrapper on top of
BPF_PROG_RUN() macro, but with two downsides:
. all programs would have to pay performance penalty for this feature and
tail call itself would be slower, since mandatory stack unwind, return,
stack allocate would be done for every tailcall.
. tailcall would be limited to programs running preempt_disabled, since
generic 'void *ctx' doesn't have room for 'tail_call_cnt' and it would
need to be either global per_cpu variable accessed by helper and by wrapper
or global variable protected by locks.
In this implementation x64 JIT bypasses stack unwind and jumps into the
callee program after prologue.
- bpf_prog_array_compatible() ensures that prog_type of callee and caller
are the same and JITed/non-JITed flag is the same, since calling JITed
program from non-JITed is invalid, since stack frames are different.
Similarly calling kprobe type program from socket type program is invalid.
- jump table is implemented as BPF_MAP_TYPE_PROG_ARRAY to reuse 'map'
abstraction, its user space API and all of verifier logic.
It's in the existing arraymap.c file, since several functions are
shared with regular array map.
Signed-off-by: Alexei Starovoitov <ast@plumgrid.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-05-20 07:59:03 +08:00
|
|
|
if (insn->imm == BPF_FUNC_tail_call) {
|
|
|
|
/* mark bpf_tail_call as different opcode
|
|
|
|
* to avoid conditional branch in
|
|
|
|
* interpeter for every normal call
|
|
|
|
* and to prevent accidental JITing by
|
|
|
|
* JIT compiler that doesn't support
|
|
|
|
* bpf_tail_call yet
|
|
|
|
*/
|
|
|
|
insn->imm = 0;
|
|
|
|
insn->code |= BPF_X;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2014-09-26 15:17:01 +08:00
|
|
|
fn = prog->aux->ops->get_func_proto(insn->imm);
|
|
|
|
/* all functions that have prototype and verifier allowed
|
|
|
|
* programs to call them, must be real in-kernel functions
|
|
|
|
*/
|
|
|
|
BUG_ON(!fn->func);
|
|
|
|
insn->imm = fn->func - __bpf_call_base;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-09-26 15:17:00 +08:00
|
|
|
/* drop refcnt on maps used by eBPF program and free auxilary data */
|
|
|
|
static void free_used_maps(struct bpf_prog_aux *aux)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < aux->used_map_cnt; i++)
|
|
|
|
bpf_map_put(aux->used_maps[i]);
|
|
|
|
|
|
|
|
kfree(aux->used_maps);
|
|
|
|
}
|
|
|
|
|
2015-05-29 10:26:02 +08:00
|
|
|
static void __prog_put_rcu(struct rcu_head *rcu)
|
|
|
|
{
|
|
|
|
struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
|
|
|
|
|
|
|
|
free_used_maps(aux);
|
|
|
|
bpf_prog_free(aux->prog);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* version of bpf_prog_put() that is called after a grace period */
|
|
|
|
void bpf_prog_put_rcu(struct bpf_prog *prog)
|
|
|
|
{
|
|
|
|
if (atomic_dec_and_test(&prog->aux->refcnt)) {
|
|
|
|
prog->aux->prog = prog;
|
|
|
|
call_rcu(&prog->aux->rcu, __prog_put_rcu);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-09-26 15:17:00 +08:00
|
|
|
void bpf_prog_put(struct bpf_prog *prog)
|
|
|
|
{
|
|
|
|
if (atomic_dec_and_test(&prog->aux->refcnt)) {
|
|
|
|
free_used_maps(prog->aux);
|
|
|
|
bpf_prog_free(prog);
|
|
|
|
}
|
|
|
|
}
|
cls_bpf: add initial eBPF support for programmable classifiers
This work extends the "classic" BPF programmable tc classifier by
extending its scope also to native eBPF code!
This allows for user space to implement own custom, 'safe' C like
classifiers (or whatever other frontend language LLVM et al may
provide in future), that can then be compiled with the LLVM eBPF
backend to an eBPF elf file. The result of this can be loaded into
the kernel via iproute2's tc. In the kernel, they can be JITed on
major archs and thus run in native performance.
Simple, minimal toy example to demonstrate the workflow:
#include <linux/ip.h>
#include <linux/if_ether.h>
#include <linux/bpf.h>
#include "tc_bpf_api.h"
__section("classify")
int cls_main(struct sk_buff *skb)
{
return (0x800 << 16) | load_byte(skb, ETH_HLEN + __builtin_offsetof(struct iphdr, tos));
}
char __license[] __section("license") = "GPL";
The classifier can then be compiled into eBPF opcodes and loaded
via tc, for example:
clang -O2 -emit-llvm -c cls.c -o - | llc -march=bpf -filetype=obj -o cls.o
tc filter add dev em1 parent 1: bpf cls.o [...]
As it has been demonstrated, the scope can even reach up to a fully
fledged flow dissector (similarly as in samples/bpf/sockex2_kern.c).
For tc, maps are allowed to be used, but from kernel context only,
in other words, eBPF code can keep state across filter invocations.
In future, we perhaps may reattach from a different application to
those maps e.g., to read out collected statistics/state.
Similarly as in socket filters, we may extend functionality for eBPF
classifiers over time depending on the use cases. For that purpose,
cls_bpf programs are using BPF_PROG_TYPE_SCHED_CLS program type, so
we can allow additional functions/accessors (e.g. an ABI compatible
offset translation to skb fields/metadata). For an initial cls_bpf
support, we allow the same set of helper functions as eBPF socket
filters, but we could diverge at some point in time w/o problem.
I was wondering whether cls_bpf and act_bpf could share C programs,
I can imagine that at some point, we introduce i) further common
handlers for both (or even beyond their scope), and/or if truly needed
ii) some restricted function space for each of them. Both can be
abstracted easily through struct bpf_verifier_ops in future.
The context of cls_bpf versus act_bpf is slightly different though:
a cls_bpf program will return a specific classid whereas act_bpf a
drop/non-drop return code, latter may also in future mangle skbs.
That said, we can surely have a "classify" and "action" section in
a single object file, or considered mentioned constraint add a
possibility of a shared section.
The workflow for getting native eBPF running from tc [1] is as
follows: for f_bpf, I've added a slightly modified ELF parser code
from Alexei's kernel sample, which reads out the LLVM compiled
object, sets up maps (and dynamically fixes up map fds) if any, and
loads the eBPF instructions all centrally through the bpf syscall.
The resulting fd from the loaded program itself is being passed down
to cls_bpf, which looks up struct bpf_prog from the fd store, and
holds reference, so that it stays available also after tc program
lifetime. On tc filter destruction, it will then drop its reference.
Moreover, I've also added the optional possibility to annotate an
eBPF filter with a name (e.g. path to object file, or something
else if preferred) so that when tc dumps currently installed filters,
some more context can be given to an admin for a given instance (as
opposed to just the file descriptor number).
Last but not least, bpf_prog_get() and bpf_prog_put() needed to be
exported, so that eBPF can be used from cls_bpf built as a module.
Thanks to 60a3b2253c41 ("net: bpf: make eBPF interpreter images
read-only") I think this is of no concern since anything wanting to
alter eBPF opcode after verification stage would crash the kernel.
[1] http://git.breakpoint.cc/cgit/dborkman/iproute2.git/log/?h=ebpf
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Cc: Jiri Pirko <jiri@resnulli.us>
Acked-by: Alexei Starovoitov <ast@plumgrid.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-03-01 19:31:48 +08:00
|
|
|
EXPORT_SYMBOL_GPL(bpf_prog_put);
|
2014-09-26 15:17:00 +08:00
|
|
|
|
|
|
|
static int bpf_prog_release(struct inode *inode, struct file *filp)
|
|
|
|
{
|
|
|
|
struct bpf_prog *prog = filp->private_data;
|
|
|
|
|
2015-05-29 10:26:02 +08:00
|
|
|
bpf_prog_put_rcu(prog);
|
2014-09-26 15:17:00 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const struct file_operations bpf_prog_fops = {
|
|
|
|
.release = bpf_prog_release,
|
|
|
|
};
|
|
|
|
|
|
|
|
static struct bpf_prog *get_prog(struct fd f)
|
|
|
|
{
|
|
|
|
struct bpf_prog *prog;
|
|
|
|
|
|
|
|
if (!f.file)
|
|
|
|
return ERR_PTR(-EBADF);
|
|
|
|
|
|
|
|
if (f.file->f_op != &bpf_prog_fops) {
|
|
|
|
fdput(f);
|
|
|
|
return ERR_PTR(-EINVAL);
|
|
|
|
}
|
|
|
|
|
|
|
|
prog = f.file->private_data;
|
|
|
|
|
|
|
|
return prog;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* called by sockets/tracing/seccomp before attaching program to an event
|
|
|
|
* pairs with bpf_prog_put()
|
|
|
|
*/
|
|
|
|
struct bpf_prog *bpf_prog_get(u32 ufd)
|
|
|
|
{
|
|
|
|
struct fd f = fdget(ufd);
|
|
|
|
struct bpf_prog *prog;
|
|
|
|
|
|
|
|
prog = get_prog(f);
|
|
|
|
|
|
|
|
if (IS_ERR(prog))
|
|
|
|
return prog;
|
|
|
|
|
|
|
|
atomic_inc(&prog->aux->refcnt);
|
|
|
|
fdput(f);
|
|
|
|
return prog;
|
|
|
|
}
|
cls_bpf: add initial eBPF support for programmable classifiers
This work extends the "classic" BPF programmable tc classifier by
extending its scope also to native eBPF code!
This allows for user space to implement own custom, 'safe' C like
classifiers (or whatever other frontend language LLVM et al may
provide in future), that can then be compiled with the LLVM eBPF
backend to an eBPF elf file. The result of this can be loaded into
the kernel via iproute2's tc. In the kernel, they can be JITed on
major archs and thus run in native performance.
Simple, minimal toy example to demonstrate the workflow:
#include <linux/ip.h>
#include <linux/if_ether.h>
#include <linux/bpf.h>
#include "tc_bpf_api.h"
__section("classify")
int cls_main(struct sk_buff *skb)
{
return (0x800 << 16) | load_byte(skb, ETH_HLEN + __builtin_offsetof(struct iphdr, tos));
}
char __license[] __section("license") = "GPL";
The classifier can then be compiled into eBPF opcodes and loaded
via tc, for example:
clang -O2 -emit-llvm -c cls.c -o - | llc -march=bpf -filetype=obj -o cls.o
tc filter add dev em1 parent 1: bpf cls.o [...]
As it has been demonstrated, the scope can even reach up to a fully
fledged flow dissector (similarly as in samples/bpf/sockex2_kern.c).
For tc, maps are allowed to be used, but from kernel context only,
in other words, eBPF code can keep state across filter invocations.
In future, we perhaps may reattach from a different application to
those maps e.g., to read out collected statistics/state.
Similarly as in socket filters, we may extend functionality for eBPF
classifiers over time depending on the use cases. For that purpose,
cls_bpf programs are using BPF_PROG_TYPE_SCHED_CLS program type, so
we can allow additional functions/accessors (e.g. an ABI compatible
offset translation to skb fields/metadata). For an initial cls_bpf
support, we allow the same set of helper functions as eBPF socket
filters, but we could diverge at some point in time w/o problem.
I was wondering whether cls_bpf and act_bpf could share C programs,
I can imagine that at some point, we introduce i) further common
handlers for both (or even beyond their scope), and/or if truly needed
ii) some restricted function space for each of them. Both can be
abstracted easily through struct bpf_verifier_ops in future.
The context of cls_bpf versus act_bpf is slightly different though:
a cls_bpf program will return a specific classid whereas act_bpf a
drop/non-drop return code, latter may also in future mangle skbs.
That said, we can surely have a "classify" and "action" section in
a single object file, or considered mentioned constraint add a
possibility of a shared section.
The workflow for getting native eBPF running from tc [1] is as
follows: for f_bpf, I've added a slightly modified ELF parser code
from Alexei's kernel sample, which reads out the LLVM compiled
object, sets up maps (and dynamically fixes up map fds) if any, and
loads the eBPF instructions all centrally through the bpf syscall.
The resulting fd from the loaded program itself is being passed down
to cls_bpf, which looks up struct bpf_prog from the fd store, and
holds reference, so that it stays available also after tc program
lifetime. On tc filter destruction, it will then drop its reference.
Moreover, I've also added the optional possibility to annotate an
eBPF filter with a name (e.g. path to object file, or something
else if preferred) so that when tc dumps currently installed filters,
some more context can be given to an admin for a given instance (as
opposed to just the file descriptor number).
Last but not least, bpf_prog_get() and bpf_prog_put() needed to be
exported, so that eBPF can be used from cls_bpf built as a module.
Thanks to 60a3b2253c41 ("net: bpf: make eBPF interpreter images
read-only") I think this is of no concern since anything wanting to
alter eBPF opcode after verification stage would crash the kernel.
[1] http://git.breakpoint.cc/cgit/dborkman/iproute2.git/log/?h=ebpf
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Cc: Jiri Pirko <jiri@resnulli.us>
Acked-by: Alexei Starovoitov <ast@plumgrid.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-03-01 19:31:48 +08:00
|
|
|
EXPORT_SYMBOL_GPL(bpf_prog_get);
|
2014-09-26 15:17:00 +08:00
|
|
|
|
|
|
|
/* last field in 'union bpf_attr' used by this command */
|
tracing, perf: Implement BPF programs attached to kprobes
BPF programs, attached to kprobes, provide a safe way to execute
user-defined BPF byte-code programs without being able to crash or
hang the kernel in any way. The BPF engine makes sure that such
programs have a finite execution time and that they cannot break
out of their sandbox.
The user interface is to attach to a kprobe via the perf syscall:
struct perf_event_attr attr = {
.type = PERF_TYPE_TRACEPOINT,
.config = event_id,
...
};
event_fd = perf_event_open(&attr,...);
ioctl(event_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
'prog_fd' is a file descriptor associated with BPF program
previously loaded.
'event_id' is an ID of the kprobe created.
Closing 'event_fd':
close(event_fd);
... automatically detaches BPF program from it.
BPF programs can call in-kernel helper functions to:
- lookup/update/delete elements in maps
- probe_read - wraper of probe_kernel_read() used to access any
kernel data structures
BPF programs receive 'struct pt_regs *' as an input ('struct pt_regs' is
architecture dependent) and return 0 to ignore the event and 1 to store
kprobe event into the ring buffer.
Note, kprobes are a fundamentally _not_ a stable kernel ABI,
so BPF programs attached to kprobes must be recompiled for
every kernel version and user must supply correct LINUX_VERSION_CODE
in attr.kern_version during bpf_prog_load() call.
Signed-off-by: Alexei Starovoitov <ast@plumgrid.com>
Reviewed-by: Steven Rostedt <rostedt@goodmis.org>
Reviewed-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: David S. Miller <davem@davemloft.net>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1427312966-8434-4-git-send-email-ast@plumgrid.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-03-26 03:49:20 +08:00
|
|
|
#define BPF_PROG_LOAD_LAST_FIELD kern_version
|
2014-09-26 15:17:00 +08:00
|
|
|
|
|
|
|
static int bpf_prog_load(union bpf_attr *attr)
|
|
|
|
{
|
|
|
|
enum bpf_prog_type type = attr->prog_type;
|
|
|
|
struct bpf_prog *prog;
|
|
|
|
int err;
|
|
|
|
char license[128];
|
|
|
|
bool is_gpl;
|
|
|
|
|
|
|
|
if (CHECK_ATTR(BPF_PROG_LOAD))
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
/* copy eBPF program license from user space */
|
|
|
|
if (strncpy_from_user(license, u64_to_ptr(attr->license),
|
|
|
|
sizeof(license) - 1) < 0)
|
|
|
|
return -EFAULT;
|
|
|
|
license[sizeof(license) - 1] = 0;
|
|
|
|
|
|
|
|
/* eBPF programs must be GPL compatible to use GPL-ed functions */
|
|
|
|
is_gpl = license_is_gpl_compatible(license);
|
|
|
|
|
|
|
|
if (attr->insn_cnt >= BPF_MAXINSNS)
|
|
|
|
return -EINVAL;
|
|
|
|
|
tracing, perf: Implement BPF programs attached to kprobes
BPF programs, attached to kprobes, provide a safe way to execute
user-defined BPF byte-code programs without being able to crash or
hang the kernel in any way. The BPF engine makes sure that such
programs have a finite execution time and that they cannot break
out of their sandbox.
The user interface is to attach to a kprobe via the perf syscall:
struct perf_event_attr attr = {
.type = PERF_TYPE_TRACEPOINT,
.config = event_id,
...
};
event_fd = perf_event_open(&attr,...);
ioctl(event_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
'prog_fd' is a file descriptor associated with BPF program
previously loaded.
'event_id' is an ID of the kprobe created.
Closing 'event_fd':
close(event_fd);
... automatically detaches BPF program from it.
BPF programs can call in-kernel helper functions to:
- lookup/update/delete elements in maps
- probe_read - wraper of probe_kernel_read() used to access any
kernel data structures
BPF programs receive 'struct pt_regs *' as an input ('struct pt_regs' is
architecture dependent) and return 0 to ignore the event and 1 to store
kprobe event into the ring buffer.
Note, kprobes are a fundamentally _not_ a stable kernel ABI,
so BPF programs attached to kprobes must be recompiled for
every kernel version and user must supply correct LINUX_VERSION_CODE
in attr.kern_version during bpf_prog_load() call.
Signed-off-by: Alexei Starovoitov <ast@plumgrid.com>
Reviewed-by: Steven Rostedt <rostedt@goodmis.org>
Reviewed-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: David S. Miller <davem@davemloft.net>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1427312966-8434-4-git-send-email-ast@plumgrid.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2015-03-26 03:49:20 +08:00
|
|
|
if (type == BPF_PROG_TYPE_KPROBE &&
|
|
|
|
attr->kern_version != LINUX_VERSION_CODE)
|
|
|
|
return -EINVAL;
|
|
|
|
|
2014-09-26 15:17:00 +08:00
|
|
|
/* plain bpf_prog allocation */
|
|
|
|
prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
|
|
|
|
if (!prog)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
prog->len = attr->insn_cnt;
|
|
|
|
|
|
|
|
err = -EFAULT;
|
|
|
|
if (copy_from_user(prog->insns, u64_to_ptr(attr->insns),
|
|
|
|
prog->len * sizeof(struct bpf_insn)) != 0)
|
|
|
|
goto free_prog;
|
|
|
|
|
|
|
|
prog->orig_prog = NULL;
|
|
|
|
prog->jited = false;
|
|
|
|
|
|
|
|
atomic_set(&prog->aux->refcnt, 1);
|
2015-03-01 19:31:47 +08:00
|
|
|
prog->gpl_compatible = is_gpl;
|
2014-09-26 15:17:00 +08:00
|
|
|
|
|
|
|
/* find program type: socket_filter vs tracing_filter */
|
|
|
|
err = find_prog_type(type, prog);
|
|
|
|
if (err < 0)
|
|
|
|
goto free_prog;
|
|
|
|
|
|
|
|
/* run eBPF verifier */
|
2015-03-14 02:57:42 +08:00
|
|
|
err = bpf_check(&prog, attr);
|
2014-09-26 15:17:00 +08:00
|
|
|
if (err < 0)
|
|
|
|
goto free_used_maps;
|
|
|
|
|
2014-09-26 15:17:01 +08:00
|
|
|
/* fixup BPF_CALL->imm field */
|
|
|
|
fixup_bpf_calls(prog);
|
|
|
|
|
2014-09-26 15:17:00 +08:00
|
|
|
/* eBPF program is ready to be JITed */
|
bpf: allow bpf programs to tail-call other bpf programs
introduce bpf_tail_call(ctx, &jmp_table, index) helper function
which can be used from BPF programs like:
int bpf_prog(struct pt_regs *ctx)
{
...
bpf_tail_call(ctx, &jmp_table, index);
...
}
that is roughly equivalent to:
int bpf_prog(struct pt_regs *ctx)
{
...
if (jmp_table[index])
return (*jmp_table[index])(ctx);
...
}
The important detail that it's not a normal call, but a tail call.
The kernel stack is precious, so this helper reuses the current
stack frame and jumps into another BPF program without adding
extra call frame.
It's trivially done in interpreter and a bit trickier in JITs.
In case of x64 JIT the bigger part of generated assembler prologue
is common for all programs, so it is simply skipped while jumping.
Other JITs can do similar prologue-skipping optimization or
do stack unwind before jumping into the next program.
bpf_tail_call() arguments:
ctx - context pointer
jmp_table - one of BPF_MAP_TYPE_PROG_ARRAY maps used as the jump table
index - index in the jump table
Since all BPF programs are idenitified by file descriptor, user space
need to populate the jmp_table with FDs of other BPF programs.
If jmp_table[index] is empty the bpf_tail_call() doesn't jump anywhere
and program execution continues as normal.
New BPF_MAP_TYPE_PROG_ARRAY map type is introduced so that user space can
populate this jmp_table array with FDs of other bpf programs.
Programs can share the same jmp_table array or use multiple jmp_tables.
The chain of tail calls can form unpredictable dynamic loops therefore
tail_call_cnt is used to limit the number of calls and currently is set to 32.
Use cases:
Acked-by: Daniel Borkmann <daniel@iogearbox.net>
==========
- simplify complex programs by splitting them into a sequence of small programs
- dispatch routine
For tracing and future seccomp the program may be triggered on all system
calls, but processing of syscall arguments will be different. It's more
efficient to implement them as:
int syscall_entry(struct seccomp_data *ctx)
{
bpf_tail_call(ctx, &syscall_jmp_table, ctx->nr /* syscall number */);
... default: process unknown syscall ...
}
int sys_write_event(struct seccomp_data *ctx) {...}
int sys_read_event(struct seccomp_data *ctx) {...}
syscall_jmp_table[__NR_write] = sys_write_event;
syscall_jmp_table[__NR_read] = sys_read_event;
For networking the program may call into different parsers depending on
packet format, like:
int packet_parser(struct __sk_buff *skb)
{
... parse L2, L3 here ...
__u8 ipproto = load_byte(skb, ... offsetof(struct iphdr, protocol));
bpf_tail_call(skb, &ipproto_jmp_table, ipproto);
... default: process unknown protocol ...
}
int parse_tcp(struct __sk_buff *skb) {...}
int parse_udp(struct __sk_buff *skb) {...}
ipproto_jmp_table[IPPROTO_TCP] = parse_tcp;
ipproto_jmp_table[IPPROTO_UDP] = parse_udp;
- for TC use case, bpf_tail_call() allows to implement reclassify-like logic
- bpf_map_update_elem/delete calls into BPF_MAP_TYPE_PROG_ARRAY jump table
are atomic, so user space can build chains of BPF programs on the fly
Implementation details:
=======================
- high performance of bpf_tail_call() is the goal.
It could have been implemented without JIT changes as a wrapper on top of
BPF_PROG_RUN() macro, but with two downsides:
. all programs would have to pay performance penalty for this feature and
tail call itself would be slower, since mandatory stack unwind, return,
stack allocate would be done for every tailcall.
. tailcall would be limited to programs running preempt_disabled, since
generic 'void *ctx' doesn't have room for 'tail_call_cnt' and it would
need to be either global per_cpu variable accessed by helper and by wrapper
or global variable protected by locks.
In this implementation x64 JIT bypasses stack unwind and jumps into the
callee program after prologue.
- bpf_prog_array_compatible() ensures that prog_type of callee and caller
are the same and JITed/non-JITed flag is the same, since calling JITed
program from non-JITed is invalid, since stack frames are different.
Similarly calling kprobe type program from socket type program is invalid.
- jump table is implemented as BPF_MAP_TYPE_PROG_ARRAY to reuse 'map'
abstraction, its user space API and all of verifier logic.
It's in the existing arraymap.c file, since several functions are
shared with regular array map.
Signed-off-by: Alexei Starovoitov <ast@plumgrid.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-05-20 07:59:03 +08:00
|
|
|
err = bpf_prog_select_runtime(prog);
|
|
|
|
if (err < 0)
|
|
|
|
goto free_used_maps;
|
2014-09-26 15:17:00 +08:00
|
|
|
|
|
|
|
err = anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog, O_RDWR | O_CLOEXEC);
|
|
|
|
if (err < 0)
|
|
|
|
/* failed to allocate fd */
|
|
|
|
goto free_used_maps;
|
|
|
|
|
|
|
|
return err;
|
|
|
|
|
|
|
|
free_used_maps:
|
|
|
|
free_used_maps(prog->aux);
|
|
|
|
free_prog:
|
|
|
|
bpf_prog_free(prog);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2014-09-26 15:16:57 +08:00
|
|
|
SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
|
|
|
|
{
|
|
|
|
union bpf_attr attr = {};
|
|
|
|
int err;
|
|
|
|
|
|
|
|
/* the syscall is limited to root temporarily. This restriction will be
|
|
|
|
* lifted when security audit is clean. Note that eBPF+tracing must have
|
|
|
|
* this restriction, since it may pass kernel data to user space
|
|
|
|
*/
|
|
|
|
if (!capable(CAP_SYS_ADMIN))
|
|
|
|
return -EPERM;
|
|
|
|
|
|
|
|
if (!access_ok(VERIFY_READ, uattr, 1))
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
if (size > PAGE_SIZE) /* silly large */
|
|
|
|
return -E2BIG;
|
|
|
|
|
|
|
|
/* If we're handed a bigger struct than we know of,
|
|
|
|
* ensure all the unknown bits are 0 - i.e. new
|
|
|
|
* user-space does not rely on any kernel feature
|
|
|
|
* extensions we dont know about yet.
|
|
|
|
*/
|
|
|
|
if (size > sizeof(attr)) {
|
|
|
|
unsigned char __user *addr;
|
|
|
|
unsigned char __user *end;
|
|
|
|
unsigned char val;
|
|
|
|
|
|
|
|
addr = (void __user *)uattr + sizeof(attr);
|
|
|
|
end = (void __user *)uattr + size;
|
|
|
|
|
|
|
|
for (; addr < end; addr++) {
|
|
|
|
err = get_user(val, addr);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
if (val)
|
|
|
|
return -E2BIG;
|
|
|
|
}
|
|
|
|
size = sizeof(attr);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* copy attributes from user space, may be less than sizeof(bpf_attr) */
|
|
|
|
if (copy_from_user(&attr, uattr, size) != 0)
|
|
|
|
return -EFAULT;
|
|
|
|
|
|
|
|
switch (cmd) {
|
|
|
|
case BPF_MAP_CREATE:
|
|
|
|
err = map_create(&attr);
|
|
|
|
break;
|
bpf: add lookup/update/delete/iterate methods to BPF maps
'maps' is a generic storage of different types for sharing data between kernel
and userspace.
The maps are accessed from user space via BPF syscall, which has commands:
- create a map with given type and attributes
fd = bpf(BPF_MAP_CREATE, union bpf_attr *attr, u32 size)
returns fd or negative error
- lookup key in a given map referenced by fd
err = bpf(BPF_MAP_LOOKUP_ELEM, union bpf_attr *attr, u32 size)
using attr->map_fd, attr->key, attr->value
returns zero and stores found elem into value or negative error
- create or update key/value pair in a given map
err = bpf(BPF_MAP_UPDATE_ELEM, union bpf_attr *attr, u32 size)
using attr->map_fd, attr->key, attr->value
returns zero or negative error
- find and delete element by key in a given map
err = bpf(BPF_MAP_DELETE_ELEM, union bpf_attr *attr, u32 size)
using attr->map_fd, attr->key
- iterate map elements (based on input key return next_key)
err = bpf(BPF_MAP_GET_NEXT_KEY, union bpf_attr *attr, u32 size)
using attr->map_fd, attr->key, attr->next_key
- close(fd) deletes the map
Signed-off-by: Alexei Starovoitov <ast@plumgrid.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-09-26 15:16:59 +08:00
|
|
|
case BPF_MAP_LOOKUP_ELEM:
|
|
|
|
err = map_lookup_elem(&attr);
|
|
|
|
break;
|
|
|
|
case BPF_MAP_UPDATE_ELEM:
|
|
|
|
err = map_update_elem(&attr);
|
|
|
|
break;
|
|
|
|
case BPF_MAP_DELETE_ELEM:
|
|
|
|
err = map_delete_elem(&attr);
|
|
|
|
break;
|
|
|
|
case BPF_MAP_GET_NEXT_KEY:
|
|
|
|
err = map_get_next_key(&attr);
|
|
|
|
break;
|
2014-09-26 15:17:00 +08:00
|
|
|
case BPF_PROG_LOAD:
|
|
|
|
err = bpf_prog_load(&attr);
|
|
|
|
break;
|
2014-09-26 15:16:57 +08:00
|
|
|
default:
|
|
|
|
err = -EINVAL;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|