bpf: Replace open coded recursion prevention in sys_bpf()

The required protection is that the caller cannot be migrated to a
different CPU as these functions end up in places which take either a hash
bucket lock or might trigger a kprobe inside the memory allocator. Both
scenarios can lead to deadlocks. The deadlock prevention is per CPU by
incrementing a per CPU variable which temporarily blocks the invocation of
BPF programs from perf and kprobes.

Replace the open coded preempt_[dis|en]able and __this_cpu_[inc|dec] pairs
with the new helper functions. These functions are already prepared to make
BPF work on PREEMPT_RT enabled kernels. No functional change for !RT
kernels.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20200224145644.317843926@linutronix.de
This commit is contained in:
Thomas Gleixner 2020-02-24 15:01:49 +01:00 committed by Alexei Starovoitov
parent 085fee1a72
commit b6e5dae15a
1 changed files with 8 additions and 19 deletions

View File

@ -171,11 +171,7 @@ static int bpf_map_update_value(struct bpf_map *map, struct fd f, void *key,
flags); flags);
} }
/* must increment bpf_prog_active to avoid kprobe+bpf triggering from bpf_disable_instrumentation();
* inside bpf map update or delete otherwise deadlocks are possible
*/
preempt_disable();
__this_cpu_inc(bpf_prog_active);
if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
err = bpf_percpu_hash_update(map, key, value, flags); err = bpf_percpu_hash_update(map, key, value, flags);
@ -206,8 +202,7 @@ static int bpf_map_update_value(struct bpf_map *map, struct fd f, void *key,
err = map->ops->map_update_elem(map, key, value, flags); err = map->ops->map_update_elem(map, key, value, flags);
rcu_read_unlock(); rcu_read_unlock();
} }
__this_cpu_dec(bpf_prog_active); bpf_enable_instrumentation();
preempt_enable();
maybe_wait_bpf_programs(map); maybe_wait_bpf_programs(map);
return err; return err;
@ -222,8 +217,7 @@ static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value,
if (bpf_map_is_dev_bound(map)) if (bpf_map_is_dev_bound(map))
return bpf_map_offload_lookup_elem(map, key, value); return bpf_map_offload_lookup_elem(map, key, value);
preempt_disable(); bpf_disable_instrumentation();
this_cpu_inc(bpf_prog_active);
if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH || if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) { map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
err = bpf_percpu_hash_copy(map, key, value); err = bpf_percpu_hash_copy(map, key, value);
@ -268,8 +262,7 @@ static int bpf_map_copy_value(struct bpf_map *map, void *key, void *value,
rcu_read_unlock(); rcu_read_unlock();
} }
this_cpu_dec(bpf_prog_active); bpf_enable_instrumentation();
preempt_enable();
maybe_wait_bpf_programs(map); maybe_wait_bpf_programs(map);
return err; return err;
@ -1136,13 +1129,11 @@ static int map_delete_elem(union bpf_attr *attr)
goto out; goto out;
} }
preempt_disable(); bpf_disable_instrumentation();
__this_cpu_inc(bpf_prog_active);
rcu_read_lock(); rcu_read_lock();
err = map->ops->map_delete_elem(map, key); err = map->ops->map_delete_elem(map, key);
rcu_read_unlock(); rcu_read_unlock();
__this_cpu_dec(bpf_prog_active); bpf_enable_instrumentation();
preempt_enable();
maybe_wait_bpf_programs(map); maybe_wait_bpf_programs(map);
out: out:
kfree(key); kfree(key);
@ -1254,13 +1245,11 @@ int generic_map_delete_batch(struct bpf_map *map,
break; break;
} }
preempt_disable(); bpf_disable_instrumentation();
__this_cpu_inc(bpf_prog_active);
rcu_read_lock(); rcu_read_lock();
err = map->ops->map_delete_elem(map, key); err = map->ops->map_delete_elem(map, key);
rcu_read_unlock(); rcu_read_unlock();
__this_cpu_dec(bpf_prog_active); bpf_enable_instrumentation();
preempt_enable();
maybe_wait_bpf_programs(map); maybe_wait_bpf_programs(map);
if (err) if (err)
break; break;