Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Alexei Starovoitov says: ==================== pull-request: bpf-next 2020-05-01 (v2) The following pull-request contains BPF updates for your *net-next* tree. We've added 61 non-merge commits during the last 6 day(s) which contain a total of 153 files changed, 6739 insertions(+), 3367 deletions(-). The main changes are: 1) pulled work.sysctl from vfs tree with sysctl bpf changes. 2) bpf_link observability, from Andrii. 3) BTF-defined map in map, from Andrii. 4) asan fixes for selftests, from Andrii. 5) Allow bpf_map_lookup_elem for SOCKMAP and SOCKHASH, from Jakub. 6) production cloudflare classifier as a selftes, from Lorenz. 7) bpf_ktime_get_*_ns() helper improvements, from Maciej. 8) unprivileged bpftool feature probe, from Quentin. 9) BPF_ENABLE_STATS command, from Song. 10) enable bpf_[gs]etsockopt() helpers for sock_ops progs, from Stanislav. 11) enable a bunch of common helpers for cg-device, sysctl, sockopt progs, from Stanislav. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
115506fea4
|
@ -203,7 +203,7 @@ static void __init register_insn_emulation(struct insn_emulation_ops *ops)
|
|||
}
|
||||
|
||||
static int emulation_proc_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
void *buffer, size_t *lenp,
|
||||
loff_t *ppos)
|
||||
{
|
||||
int ret = 0;
|
||||
|
|
|
@ -341,8 +341,7 @@ static unsigned int find_supported_vector_length(unsigned int vl)
|
|||
#ifdef CONFIG_SYSCTL
|
||||
|
||||
static int sve_proc_do_default_vl(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int ret;
|
||||
int vl = sve_default_vl;
|
||||
|
|
|
@ -95,16 +95,15 @@ int proc_lasat_ip(struct ctl_table *table, int write,
|
|||
len = 0;
|
||||
p = buffer;
|
||||
while (len < *lenp) {
|
||||
if (get_user(c, p++))
|
||||
return -EFAULT;
|
||||
c = *p;
|
||||
p++;
|
||||
if (c == 0 || c == '\n')
|
||||
break;
|
||||
len++;
|
||||
}
|
||||
if (len >= sizeof(ipbuf)-1)
|
||||
len = sizeof(ipbuf) - 1;
|
||||
if (copy_from_user(ipbuf, buffer, len))
|
||||
return -EFAULT;
|
||||
memcpy(ipbuf, buffer, len);
|
||||
ipbuf[len] = 0;
|
||||
*ppos += *lenp;
|
||||
/* Now see if we can convert it to a valid IP */
|
||||
|
@ -122,11 +121,9 @@ int proc_lasat_ip(struct ctl_table *table, int write,
|
|||
if (len > *lenp)
|
||||
len = *lenp;
|
||||
if (len)
|
||||
if (copy_to_user(buffer, ipbuf, len))
|
||||
return -EFAULT;
|
||||
memcpy(buffer, ipbuf, len);
|
||||
if (len < *lenp) {
|
||||
if (put_user('\n', ((char *) buffer) + len))
|
||||
return -EFAULT;
|
||||
*((char *)buffer + len) = '\n';
|
||||
len++;
|
||||
}
|
||||
*lenp = len;
|
||||
|
|
|
@ -13,8 +13,35 @@
|
|||
#include <linux/filter.h>
|
||||
#include "bpf_jit.h"
|
||||
|
||||
/*
|
||||
* Stack layout during BPF program execution:
|
||||
*
|
||||
* high
|
||||
* RV32 fp => +----------+
|
||||
* | saved ra |
|
||||
* | saved fp | RV32 callee-saved registers
|
||||
* | ... |
|
||||
* +----------+ <= (fp - 4 * NR_SAVED_REGISTERS)
|
||||
* | hi(R6) |
|
||||
* | lo(R6) |
|
||||
* | hi(R7) | JIT scratch space for BPF registers
|
||||
* | lo(R7) |
|
||||
* | ... |
|
||||
* BPF_REG_FP => +----------+ <= (fp - 4 * NR_SAVED_REGISTERS
|
||||
* | | - 4 * BPF_JIT_SCRATCH_REGS)
|
||||
* | |
|
||||
* | ... | BPF program stack
|
||||
* | |
|
||||
* RV32 sp => +----------+
|
||||
* | |
|
||||
* | ... | Function call stack
|
||||
* | |
|
||||
* +----------+
|
||||
* low
|
||||
*/
|
||||
|
||||
enum {
|
||||
/* Stack layout - these are offsets from (top of stack - 4). */
|
||||
/* Stack layout - these are offsets from top of JIT scratch space. */
|
||||
BPF_R6_HI,
|
||||
BPF_R6_LO,
|
||||
BPF_R7_HI,
|
||||
|
@ -29,7 +56,11 @@ enum {
|
|||
BPF_JIT_SCRATCH_REGS,
|
||||
};
|
||||
|
||||
#define STACK_OFFSET(k) (-4 - ((k) * 4))
|
||||
/* Number of callee-saved registers stored to stack: ra, fp, s1--s7. */
|
||||
#define NR_SAVED_REGISTERS 9
|
||||
|
||||
/* Offset from fp for BPF registers stored on stack. */
|
||||
#define STACK_OFFSET(k) (-4 - (4 * NR_SAVED_REGISTERS) - (4 * (k)))
|
||||
|
||||
#define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
|
||||
#define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
|
||||
|
@ -111,11 +142,9 @@ static void emit_imm64(const s8 *rd, s32 imm_hi, s32 imm_lo,
|
|||
|
||||
static void __build_epilogue(bool is_tail_call, struct rv_jit_context *ctx)
|
||||
{
|
||||
int stack_adjust = ctx->stack_size, store_offset = stack_adjust - 4;
|
||||
int stack_adjust = ctx->stack_size;
|
||||
const s8 *r0 = bpf2rv32[BPF_REG_0];
|
||||
|
||||
store_offset -= 4 * BPF_JIT_SCRATCH_REGS;
|
||||
|
||||
/* Set return value if not tail call. */
|
||||
if (!is_tail_call) {
|
||||
emit(rv_addi(RV_REG_A0, lo(r0), 0), ctx);
|
||||
|
@ -123,15 +152,15 @@ static void __build_epilogue(bool is_tail_call, struct rv_jit_context *ctx)
|
|||
}
|
||||
|
||||
/* Restore callee-saved registers. */
|
||||
emit(rv_lw(RV_REG_RA, store_offset - 0, RV_REG_SP), ctx);
|
||||
emit(rv_lw(RV_REG_FP, store_offset - 4, RV_REG_SP), ctx);
|
||||
emit(rv_lw(RV_REG_S1, store_offset - 8, RV_REG_SP), ctx);
|
||||
emit(rv_lw(RV_REG_S2, store_offset - 12, RV_REG_SP), ctx);
|
||||
emit(rv_lw(RV_REG_S3, store_offset - 16, RV_REG_SP), ctx);
|
||||
emit(rv_lw(RV_REG_S4, store_offset - 20, RV_REG_SP), ctx);
|
||||
emit(rv_lw(RV_REG_S5, store_offset - 24, RV_REG_SP), ctx);
|
||||
emit(rv_lw(RV_REG_S6, store_offset - 28, RV_REG_SP), ctx);
|
||||
emit(rv_lw(RV_REG_S7, store_offset - 32, RV_REG_SP), ctx);
|
||||
emit(rv_lw(RV_REG_RA, stack_adjust - 4, RV_REG_SP), ctx);
|
||||
emit(rv_lw(RV_REG_FP, stack_adjust - 8, RV_REG_SP), ctx);
|
||||
emit(rv_lw(RV_REG_S1, stack_adjust - 12, RV_REG_SP), ctx);
|
||||
emit(rv_lw(RV_REG_S2, stack_adjust - 16, RV_REG_SP), ctx);
|
||||
emit(rv_lw(RV_REG_S3, stack_adjust - 20, RV_REG_SP), ctx);
|
||||
emit(rv_lw(RV_REG_S4, stack_adjust - 24, RV_REG_SP), ctx);
|
||||
emit(rv_lw(RV_REG_S5, stack_adjust - 28, RV_REG_SP), ctx);
|
||||
emit(rv_lw(RV_REG_S6, stack_adjust - 32, RV_REG_SP), ctx);
|
||||
emit(rv_lw(RV_REG_S7, stack_adjust - 36, RV_REG_SP), ctx);
|
||||
|
||||
emit(rv_addi(RV_REG_SP, RV_REG_SP, stack_adjust), ctx);
|
||||
|
||||
|
@ -770,12 +799,13 @@ static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx)
|
|||
emit_bcc(BPF_JGE, lo(idx_reg), RV_REG_T1, off, ctx);
|
||||
|
||||
/*
|
||||
* if ((temp_tcc = tcc - 1) < 0)
|
||||
* temp_tcc = tcc - 1;
|
||||
* if (tcc < 0)
|
||||
* goto out;
|
||||
*/
|
||||
emit(rv_addi(RV_REG_T1, RV_REG_TCC, -1), ctx);
|
||||
off = (tc_ninsn - (ctx->ninsns - start_insn)) << 2;
|
||||
emit_bcc(BPF_JSLT, RV_REG_T1, RV_REG_ZERO, off, ctx);
|
||||
emit_bcc(BPF_JSLT, RV_REG_TCC, RV_REG_ZERO, off, ctx);
|
||||
|
||||
/*
|
||||
* prog = array->ptrs[index];
|
||||
|
@ -1259,17 +1289,20 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
|
|||
|
||||
void bpf_jit_build_prologue(struct rv_jit_context *ctx)
|
||||
{
|
||||
/* Make space to save 9 registers: ra, fp, s1--s7. */
|
||||
int stack_adjust = 9 * sizeof(u32), store_offset, bpf_stack_adjust;
|
||||
const s8 *fp = bpf2rv32[BPF_REG_FP];
|
||||
const s8 *r1 = bpf2rv32[BPF_REG_1];
|
||||
int stack_adjust = 0;
|
||||
int bpf_stack_adjust =
|
||||
round_up(ctx->prog->aux->stack_depth, STACK_ALIGN);
|
||||
|
||||
bpf_stack_adjust = round_up(ctx->prog->aux->stack_depth, 16);
|
||||
/* Make space for callee-saved registers. */
|
||||
stack_adjust += NR_SAVED_REGISTERS * sizeof(u32);
|
||||
/* Make space for BPF registers on stack. */
|
||||
stack_adjust += BPF_JIT_SCRATCH_REGS * sizeof(u32);
|
||||
/* Make space for BPF stack. */
|
||||
stack_adjust += bpf_stack_adjust;
|
||||
|
||||
store_offset = stack_adjust - 4;
|
||||
|
||||
stack_adjust += 4 * BPF_JIT_SCRATCH_REGS;
|
||||
/* Round up for stack alignment. */
|
||||
stack_adjust = round_up(stack_adjust, STACK_ALIGN);
|
||||
|
||||
/*
|
||||
* The first instruction sets the tail-call-counter (TCC) register.
|
||||
|
@ -1280,24 +1313,24 @@ void bpf_jit_build_prologue(struct rv_jit_context *ctx)
|
|||
emit(rv_addi(RV_REG_SP, RV_REG_SP, -stack_adjust), ctx);
|
||||
|
||||
/* Save callee-save registers. */
|
||||
emit(rv_sw(RV_REG_SP, store_offset - 0, RV_REG_RA), ctx);
|
||||
emit(rv_sw(RV_REG_SP, store_offset - 4, RV_REG_FP), ctx);
|
||||
emit(rv_sw(RV_REG_SP, store_offset - 8, RV_REG_S1), ctx);
|
||||
emit(rv_sw(RV_REG_SP, store_offset - 12, RV_REG_S2), ctx);
|
||||
emit(rv_sw(RV_REG_SP, store_offset - 16, RV_REG_S3), ctx);
|
||||
emit(rv_sw(RV_REG_SP, store_offset - 20, RV_REG_S4), ctx);
|
||||
emit(rv_sw(RV_REG_SP, store_offset - 24, RV_REG_S5), ctx);
|
||||
emit(rv_sw(RV_REG_SP, store_offset - 28, RV_REG_S6), ctx);
|
||||
emit(rv_sw(RV_REG_SP, store_offset - 32, RV_REG_S7), ctx);
|
||||
emit(rv_sw(RV_REG_SP, stack_adjust - 4, RV_REG_RA), ctx);
|
||||
emit(rv_sw(RV_REG_SP, stack_adjust - 8, RV_REG_FP), ctx);
|
||||
emit(rv_sw(RV_REG_SP, stack_adjust - 12, RV_REG_S1), ctx);
|
||||
emit(rv_sw(RV_REG_SP, stack_adjust - 16, RV_REG_S2), ctx);
|
||||
emit(rv_sw(RV_REG_SP, stack_adjust - 20, RV_REG_S3), ctx);
|
||||
emit(rv_sw(RV_REG_SP, stack_adjust - 24, RV_REG_S4), ctx);
|
||||
emit(rv_sw(RV_REG_SP, stack_adjust - 28, RV_REG_S5), ctx);
|
||||
emit(rv_sw(RV_REG_SP, stack_adjust - 32, RV_REG_S6), ctx);
|
||||
emit(rv_sw(RV_REG_SP, stack_adjust - 36, RV_REG_S7), ctx);
|
||||
|
||||
/* Set fp: used as the base address for stacked BPF registers. */
|
||||
emit(rv_addi(RV_REG_FP, RV_REG_SP, stack_adjust), ctx);
|
||||
|
||||
/* Set up BPF stack pointer. */
|
||||
/* Set up BPF frame pointer. */
|
||||
emit(rv_addi(lo(fp), RV_REG_SP, bpf_stack_adjust), ctx);
|
||||
emit(rv_addi(hi(fp), RV_REG_ZERO, 0), ctx);
|
||||
|
||||
/* Set up context pointer. */
|
||||
/* Set up BPF context pointer. */
|
||||
emit(rv_addi(lo(r1), RV_REG_A0, 0), ctx);
|
||||
emit(rv_addi(hi(r1), RV_REG_ZERO, 0), ctx);
|
||||
|
||||
|
|
|
@ -51,10 +51,9 @@ static struct platform_device *appldata_pdev;
|
|||
*/
|
||||
static const char appldata_proc_name[APPLDATA_PROC_NAME_LENGTH] = "appldata";
|
||||
static int appldata_timer_handler(struct ctl_table *ctl, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos);
|
||||
void *buffer, size_t *lenp, loff_t *ppos);
|
||||
static int appldata_interval_handler(struct ctl_table *ctl, int write,
|
||||
void __user *buffer,
|
||||
size_t *lenp, loff_t *ppos);
|
||||
void *buffer, size_t *lenp, loff_t *ppos);
|
||||
|
||||
static struct ctl_table_header *appldata_sysctl_header;
|
||||
static struct ctl_table appldata_table[] = {
|
||||
|
@ -217,7 +216,7 @@ static void __appldata_vtimer_setup(int cmd)
|
|||
*/
|
||||
static int
|
||||
appldata_timer_handler(struct ctl_table *ctl, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int timer_active = appldata_timer_active;
|
||||
int rc;
|
||||
|
@ -250,7 +249,7 @@ appldata_timer_handler(struct ctl_table *ctl, int write,
|
|||
*/
|
||||
static int
|
||||
appldata_interval_handler(struct ctl_table *ctl, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int interval = appldata_interval;
|
||||
int rc;
|
||||
|
@ -280,7 +279,7 @@ appldata_interval_handler(struct ctl_table *ctl, int write,
|
|||
*/
|
||||
static int
|
||||
appldata_generic_handler(struct ctl_table *ctl, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
struct appldata_ops *ops = NULL, *tmp_ops;
|
||||
struct list_head *lh;
|
||||
|
|
|
@ -867,7 +867,7 @@ static int debug_active = 1;
|
|||
* if debug_active is already off
|
||||
*/
|
||||
static int s390dbf_procactive(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
if (!write || debug_stoppable || !debug_active)
|
||||
return proc_dointvec(table, write, buffer, lenp, ppos);
|
||||
|
|
|
@ -594,7 +594,7 @@ static int __init topology_setup(char *str)
|
|||
early_param("topology", topology_setup);
|
||||
|
||||
static int topology_ctl_handler(struct ctl_table *ctl, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int enabled = topology_is_enabled();
|
||||
int new_mode;
|
||||
|
|
|
@ -245,7 +245,7 @@ static int cmm_skip_blanks(char *cp, char **endp)
|
|||
}
|
||||
|
||||
static int cmm_pages_handler(struct ctl_table *ctl, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
long nr = cmm_get_pages();
|
||||
struct ctl_table ctl_entry = {
|
||||
|
@ -264,7 +264,7 @@ static int cmm_pages_handler(struct ctl_table *ctl, int write,
|
|||
}
|
||||
|
||||
static int cmm_timed_pages_handler(struct ctl_table *ctl, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
void *buffer, size_t *lenp,
|
||||
loff_t *ppos)
|
||||
{
|
||||
long nr = cmm_get_timed_pages();
|
||||
|
@ -284,7 +284,7 @@ static int cmm_timed_pages_handler(struct ctl_table *ctl, int write,
|
|||
}
|
||||
|
||||
static int cmm_timeout_handler(struct ctl_table *ctl, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
char buf[64], *p;
|
||||
long nr, seconds;
|
||||
|
@ -297,8 +297,7 @@ static int cmm_timeout_handler(struct ctl_table *ctl, int write,
|
|||
|
||||
if (write) {
|
||||
len = min(*lenp, sizeof(buf));
|
||||
if (copy_from_user(buf, buffer, len))
|
||||
return -EFAULT;
|
||||
memcpy(buf, buffer, len);
|
||||
buf[len - 1] = '\0';
|
||||
cmm_skip_blanks(buf, &p);
|
||||
nr = simple_strtoul(p, &p, 0);
|
||||
|
@ -311,8 +310,7 @@ static int cmm_timeout_handler(struct ctl_table *ctl, int write,
|
|||
cmm_timeout_pages, cmm_timeout_seconds);
|
||||
if (len > *lenp)
|
||||
len = *lenp;
|
||||
if (copy_to_user(buffer, buf, len))
|
||||
return -EFAULT;
|
||||
memcpy(buffer, buf, len);
|
||||
*lenp = len;
|
||||
*ppos += len;
|
||||
}
|
||||
|
|
|
@ -39,8 +39,7 @@ static bool __read_mostly sched_itmt_capable;
|
|||
unsigned int __read_mostly sysctl_sched_itmt_enabled;
|
||||
|
||||
static int sched_itmt_update_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
unsigned int old_sysctl;
|
||||
int ret;
|
||||
|
|
|
@ -3631,7 +3631,7 @@ static void cdrom_update_settings(void)
|
|||
}
|
||||
|
||||
static int cdrom_sysctl_handler(struct ctl_table *ctl, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
|
|
@ -2057,7 +2057,7 @@ static char sysctl_bootid[16];
|
|||
* sysctl system call, as 16 bytes of binary data.
|
||||
*/
|
||||
static int proc_do_uuid(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
struct ctl_table fake_table;
|
||||
unsigned char buf[64], tmp_uuid[16], *uuid;
|
||||
|
|
|
@ -183,8 +183,7 @@ static void mac_hid_stop_emulation(void)
|
|||
}
|
||||
|
||||
static int mac_hid_toggle_emumouse(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int *valp = table->data;
|
||||
int old_val = *valp;
|
||||
|
|
|
@ -103,6 +103,8 @@ lirc_mode2_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
|||
return &bpf_map_peek_elem_proto;
|
||||
case BPF_FUNC_ktime_get_ns:
|
||||
return &bpf_ktime_get_ns_proto;
|
||||
case BPF_FUNC_ktime_get_boot_ns:
|
||||
return &bpf_ktime_get_boot_ns_proto;
|
||||
case BPF_FUNC_tail_call:
|
||||
return &bpf_tail_call_proto;
|
||||
case BPF_FUNC_get_prandom_u32:
|
||||
|
|
|
@ -34,7 +34,7 @@
|
|||
#define PARPORT_MAX_SPINTIME_VALUE 1000
|
||||
|
||||
static int do_active_device(struct ctl_table *table, int write,
|
||||
void __user *result, size_t *lenp, loff_t *ppos)
|
||||
void *result, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
struct parport *port = (struct parport *)table->extra1;
|
||||
char buffer[256];
|
||||
|
@ -65,13 +65,13 @@ static int do_active_device(struct ctl_table *table, int write,
|
|||
*lenp = len;
|
||||
|
||||
*ppos += len;
|
||||
|
||||
return copy_to_user(result, buffer, len) ? -EFAULT : 0;
|
||||
memcpy(result, buffer, len);
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PARPORT_1284
|
||||
static int do_autoprobe(struct ctl_table *table, int write,
|
||||
void __user *result, size_t *lenp, loff_t *ppos)
|
||||
void *result, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
struct parport_device_info *info = table->extra2;
|
||||
const char *str;
|
||||
|
@ -108,13 +108,13 @@ static int do_autoprobe(struct ctl_table *table, int write,
|
|||
|
||||
*ppos += len;
|
||||
|
||||
return copy_to_user (result, buffer, len) ? -EFAULT : 0;
|
||||
memcpy(result, buffer, len);
|
||||
return 0;
|
||||
}
|
||||
#endif /* IEEE1284.3 support. */
|
||||
|
||||
static int do_hardware_base_addr(struct ctl_table *table, int write,
|
||||
void __user *result,
|
||||
size_t *lenp, loff_t *ppos)
|
||||
void *result, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
struct parport *port = (struct parport *)table->extra1;
|
||||
char buffer[20];
|
||||
|
@ -136,13 +136,12 @@ static int do_hardware_base_addr(struct ctl_table *table, int write,
|
|||
*lenp = len;
|
||||
|
||||
*ppos += len;
|
||||
|
||||
return copy_to_user(result, buffer, len) ? -EFAULT : 0;
|
||||
memcpy(result, buffer, len);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int do_hardware_irq(struct ctl_table *table, int write,
|
||||
void __user *result,
|
||||
size_t *lenp, loff_t *ppos)
|
||||
void *result, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
struct parport *port = (struct parport *)table->extra1;
|
||||
char buffer[20];
|
||||
|
@ -164,13 +163,12 @@ static int do_hardware_irq(struct ctl_table *table, int write,
|
|||
*lenp = len;
|
||||
|
||||
*ppos += len;
|
||||
|
||||
return copy_to_user(result, buffer, len) ? -EFAULT : 0;
|
||||
memcpy(result, buffer, len);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int do_hardware_dma(struct ctl_table *table, int write,
|
||||
void __user *result,
|
||||
size_t *lenp, loff_t *ppos)
|
||||
void *result, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
struct parport *port = (struct parport *)table->extra1;
|
||||
char buffer[20];
|
||||
|
@ -192,13 +190,12 @@ static int do_hardware_dma(struct ctl_table *table, int write,
|
|||
*lenp = len;
|
||||
|
||||
*ppos += len;
|
||||
|
||||
return copy_to_user(result, buffer, len) ? -EFAULT : 0;
|
||||
memcpy(result, buffer, len);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int do_hardware_modes(struct ctl_table *table, int write,
|
||||
void __user *result,
|
||||
size_t *lenp, loff_t *ppos)
|
||||
void *result, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
struct parport *port = (struct parport *)table->extra1;
|
||||
char buffer[40];
|
||||
|
@ -231,8 +228,8 @@ static int do_hardware_modes(struct ctl_table *table, int write,
|
|||
*lenp = len;
|
||||
|
||||
*ppos += len;
|
||||
|
||||
return copy_to_user(result, buffer, len) ? -EFAULT : 0;
|
||||
memcpy(result, buffer, len);
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define PARPORT_PORT_DIR(CHILD) { .procname = NULL, .mode = 0555, .child = CHILD }
|
||||
|
|
|
@ -165,7 +165,7 @@ static long get_nr_dentry_negative(void)
|
|||
return sum < 0 ? 0 : sum;
|
||||
}
|
||||
|
||||
int proc_nr_dentry(struct ctl_table *table, int write, void __user *buffer,
|
||||
int proc_nr_dentry(struct ctl_table *table, int write, void *buffer,
|
||||
size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
dentry_stat.nr_dentry = get_nr_dentry();
|
||||
|
|
|
@ -47,7 +47,7 @@ static void drop_pagecache_sb(struct super_block *sb, void *unused)
|
|||
}
|
||||
|
||||
int drop_caches_sysctl_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *length, loff_t *ppos)
|
||||
void *buffer, size_t *length, loff_t *ppos)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
|
|
@ -80,14 +80,14 @@ EXPORT_SYMBOL_GPL(get_max_files);
|
|||
*/
|
||||
#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS)
|
||||
int proc_nr_files(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
files_stat.nr_files = get_nr_files();
|
||||
return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
|
||||
}
|
||||
#else
|
||||
int proc_nr_files(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
|
|
@ -51,8 +51,7 @@ static unsigned fscache_op_max_active = 2;
|
|||
static struct ctl_table_header *fscache_sysctl_header;
|
||||
|
||||
static int fscache_max_active_sysctl(struct ctl_table *table, int write,
|
||||
void __user *buffer,
|
||||
size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
struct workqueue_struct **wqp = table->extra1;
|
||||
unsigned int *datap = table->data;
|
||||
|
|
|
@ -108,7 +108,7 @@ long get_nr_dirty_inodes(void)
|
|||
*/
|
||||
#ifdef CONFIG_SYSCTL
|
||||
int proc_nr_inodes(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
inodes_stat.nr_inodes = get_nr_inodes();
|
||||
inodes_stat.nr_unused = get_nr_inodes_unused();
|
||||
|
|
|
@ -539,13 +539,13 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
|
|||
return err;
|
||||
}
|
||||
|
||||
static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
|
||||
static ssize_t proc_sys_call_handler(struct file *filp, void __user *ubuf,
|
||||
size_t count, loff_t *ppos, int write)
|
||||
{
|
||||
struct inode *inode = file_inode(filp);
|
||||
struct ctl_table_header *head = grab_header(inode);
|
||||
struct ctl_table *table = PROC_I(inode)->sysctl_entry;
|
||||
void *new_buf = NULL;
|
||||
void *kbuf;
|
||||
ssize_t error;
|
||||
|
||||
if (IS_ERR(head))
|
||||
|
@ -564,27 +564,38 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
|
|||
if (!table->proc_handler)
|
||||
goto out;
|
||||
|
||||
error = BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, &count,
|
||||
ppos, &new_buf);
|
||||
if (error)
|
||||
goto out;
|
||||
|
||||
/* careful: calling conventions are nasty here */
|
||||
if (new_buf) {
|
||||
mm_segment_t old_fs;
|
||||
|
||||
old_fs = get_fs();
|
||||
set_fs(KERNEL_DS);
|
||||
error = table->proc_handler(table, write, (void __user *)new_buf,
|
||||
&count, ppos);
|
||||
set_fs(old_fs);
|
||||
kfree(new_buf);
|
||||
if (write) {
|
||||
kbuf = memdup_user_nul(ubuf, count);
|
||||
if (IS_ERR(kbuf)) {
|
||||
error = PTR_ERR(kbuf);
|
||||
goto out;
|
||||
}
|
||||
} else {
|
||||
error = table->proc_handler(table, write, buf, &count, ppos);
|
||||
error = -ENOMEM;
|
||||
kbuf = kzalloc(count, GFP_KERNEL);
|
||||
if (!kbuf)
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (!error)
|
||||
error = count;
|
||||
error = BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, &kbuf, &count,
|
||||
ppos);
|
||||
if (error)
|
||||
goto out_free_buf;
|
||||
|
||||
/* careful: calling conventions are nasty here */
|
||||
error = table->proc_handler(table, write, kbuf, &count, ppos);
|
||||
if (error)
|
||||
goto out_free_buf;
|
||||
|
||||
if (!write) {
|
||||
error = -EFAULT;
|
||||
if (copy_to_user(ubuf, kbuf, count))
|
||||
goto out_free_buf;
|
||||
}
|
||||
|
||||
error = count;
|
||||
out_free_buf:
|
||||
kfree(kbuf);
|
||||
out:
|
||||
sysctl_head_finish(head);
|
||||
|
||||
|
|
|
@ -2841,7 +2841,7 @@ const struct quotactl_ops dquot_quotactl_sysfile_ops = {
|
|||
EXPORT_SYMBOL(dquot_quotactl_sysfile_ops);
|
||||
|
||||
static int do_proc_dqstats(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
unsigned int type = (unsigned long *)table->data - dqstats.stat;
|
||||
s64 value = percpu_counter_sum(&dqstats.counter[type]);
|
||||
|
|
|
@ -13,7 +13,7 @@ STATIC int
|
|||
xfs_stats_clear_proc_handler(
|
||||
struct ctl_table *ctl,
|
||||
int write,
|
||||
void __user *buffer,
|
||||
void *buffer,
|
||||
size_t *lenp,
|
||||
loff_t *ppos)
|
||||
{
|
||||
|
@ -33,7 +33,7 @@ STATIC int
|
|||
xfs_panic_mask_proc_handler(
|
||||
struct ctl_table *ctl,
|
||||
int write,
|
||||
void __user *buffer,
|
||||
void *buffer,
|
||||
size_t *lenp,
|
||||
loff_t *ppos)
|
||||
{
|
||||
|
|
|
@ -57,8 +57,6 @@ struct bpf_cgroup_link {
|
|||
enum bpf_attach_type type;
|
||||
};
|
||||
|
||||
extern const struct bpf_link_ops bpf_cgroup_link_lops;
|
||||
|
||||
struct bpf_prog_list {
|
||||
struct list_head node;
|
||||
struct bpf_prog *prog;
|
||||
|
@ -100,8 +98,6 @@ int __cgroup_bpf_attach(struct cgroup *cgrp,
|
|||
int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
|
||||
struct bpf_cgroup_link *link,
|
||||
enum bpf_attach_type type);
|
||||
int __cgroup_bpf_replace(struct cgroup *cgrp, struct bpf_cgroup_link *link,
|
||||
struct bpf_prog *new_prog);
|
||||
int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
|
||||
union bpf_attr __user *uattr);
|
||||
|
||||
|
@ -112,8 +108,6 @@ int cgroup_bpf_attach(struct cgroup *cgrp,
|
|||
u32 flags);
|
||||
int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
|
||||
enum bpf_attach_type type);
|
||||
int cgroup_bpf_replace(struct bpf_link *link, struct bpf_prog *old_prog,
|
||||
struct bpf_prog *new_prog);
|
||||
int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
|
||||
union bpf_attr __user *uattr);
|
||||
|
||||
|
@ -138,8 +132,7 @@ int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
|
|||
|
||||
int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
|
||||
struct ctl_table *table, int write,
|
||||
void __user *buf, size_t *pcount,
|
||||
loff_t *ppos, void **new_buf,
|
||||
void **buf, size_t *pcount, loff_t *ppos,
|
||||
enum bpf_attach_type type);
|
||||
|
||||
int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level,
|
||||
|
@ -302,12 +295,12 @@ int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
|
|||
})
|
||||
|
||||
|
||||
#define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos, nbuf) \
|
||||
#define BPF_CGROUP_RUN_PROG_SYSCTL(head, table, write, buf, count, pos) \
|
||||
({ \
|
||||
int __ret = 0; \
|
||||
if (cgroup_bpf_enabled) \
|
||||
__ret = __cgroup_bpf_run_filter_sysctl(head, table, write, \
|
||||
buf, count, pos, nbuf, \
|
||||
buf, count, pos, \
|
||||
BPF_CGROUP_SYSCTL); \
|
||||
__ret; \
|
||||
})
|
||||
|
@ -354,7 +347,6 @@ int cgroup_bpf_prog_query(const union bpf_attr *attr,
|
|||
#else
|
||||
|
||||
struct bpf_prog;
|
||||
struct bpf_link;
|
||||
struct cgroup_bpf {};
|
||||
static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
|
||||
static inline void cgroup_bpf_offline(struct cgroup *cgrp) {}
|
||||
|
@ -378,13 +370,6 @@ static inline int cgroup_bpf_link_attach(const union bpf_attr *attr,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int cgroup_bpf_replace(struct bpf_link *link,
|
||||
struct bpf_prog *old_prog,
|
||||
struct bpf_prog *new_prog)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
|
||||
union bpf_attr __user *uattr)
|
||||
{
|
||||
|
@ -429,7 +414,7 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
|
|||
#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) ({ 0; })
|
||||
#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
|
||||
#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; })
|
||||
#define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos,nbuf) ({ 0; })
|
||||
#define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos) ({ 0; })
|
||||
#define BPF_CGROUP_GETSOCKOPT_MAX_OPTLEN(optlen) ({ 0; })
|
||||
#define BPF_CGROUP_RUN_PROG_GETSOCKOPT(sock, level, optname, optval, \
|
||||
optlen, max_optlen, retval) ({ retval; })
|
||||
|
|
|
@ -987,6 +987,7 @@ _out: \
|
|||
|
||||
#ifdef CONFIG_BPF_SYSCALL
|
||||
DECLARE_PER_CPU(int, bpf_prog_active);
|
||||
extern struct mutex bpf_stats_enabled_mutex;
|
||||
|
||||
/*
|
||||
* Block execution of BPF programs attached to instrumentation (perf,
|
||||
|
@ -1026,9 +1027,11 @@ extern const struct file_operations bpf_prog_fops;
|
|||
extern const struct bpf_verifier_ops _name ## _verifier_ops;
|
||||
#define BPF_MAP_TYPE(_id, _ops) \
|
||||
extern const struct bpf_map_ops _ops;
|
||||
#define BPF_LINK_TYPE(_id, _name)
|
||||
#include <linux/bpf_types.h>
|
||||
#undef BPF_PROG_TYPE
|
||||
#undef BPF_MAP_TYPE
|
||||
#undef BPF_LINK_TYPE
|
||||
|
||||
extern const struct bpf_prog_ops bpf_offload_prog_ops;
|
||||
extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops;
|
||||
|
@ -1085,21 +1088,35 @@ int bpf_prog_new_fd(struct bpf_prog *prog);
|
|||
|
||||
struct bpf_link {
|
||||
atomic64_t refcnt;
|
||||
u32 id;
|
||||
enum bpf_link_type type;
|
||||
const struct bpf_link_ops *ops;
|
||||
struct bpf_prog *prog;
|
||||
struct work_struct work;
|
||||
};
|
||||
|
||||
struct bpf_link_primer {
|
||||
struct bpf_link *link;
|
||||
struct file *file;
|
||||
int fd;
|
||||
u32 id;
|
||||
};
|
||||
|
||||
struct bpf_link_ops {
|
||||
void (*release)(struct bpf_link *link);
|
||||
void (*dealloc)(struct bpf_link *link);
|
||||
|
||||
int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog,
|
||||
struct bpf_prog *old_prog);
|
||||
void (*show_fdinfo)(const struct bpf_link *link, struct seq_file *seq);
|
||||
int (*fill_link_info)(const struct bpf_link *link,
|
||||
struct bpf_link_info *info);
|
||||
};
|
||||
|
||||
void bpf_link_init(struct bpf_link *link, const struct bpf_link_ops *ops,
|
||||
struct bpf_prog *prog);
|
||||
void bpf_link_cleanup(struct bpf_link *link, struct file *link_file,
|
||||
int link_fd);
|
||||
void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
|
||||
const struct bpf_link_ops *ops, struct bpf_prog *prog);
|
||||
int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer);
|
||||
int bpf_link_settle(struct bpf_link_primer *primer);
|
||||
void bpf_link_cleanup(struct bpf_link_primer *primer);
|
||||
void bpf_link_inc(struct bpf_link *link);
|
||||
void bpf_link_put(struct bpf_link *link);
|
||||
int bpf_link_new_fd(struct bpf_link *link);
|
||||
|
@ -1215,6 +1232,7 @@ int btf_check_type_match(struct bpf_verifier_env *env, struct bpf_prog *prog,
|
|||
|
||||
struct bpf_prog *bpf_prog_by_id(u32 id);
|
||||
|
||||
const struct bpf_func_proto *bpf_base_func_proto(enum bpf_func_id func_id);
|
||||
#else /* !CONFIG_BPF_SYSCALL */
|
||||
static inline struct bpf_prog *bpf_prog_get(u32 ufd)
|
||||
{
|
||||
|
@ -1365,6 +1383,12 @@ static inline struct bpf_prog *bpf_prog_by_id(u32 id)
|
|||
{
|
||||
return ERR_PTR(-ENOTSUPP);
|
||||
}
|
||||
|
||||
static inline const struct bpf_func_proto *
|
||||
bpf_base_func_proto(enum bpf_func_id func_id)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif /* CONFIG_BPF_SYSCALL */
|
||||
|
||||
static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
|
||||
|
@ -1502,6 +1526,7 @@ extern const struct bpf_func_proto bpf_get_smp_processor_id_proto;
|
|||
extern const struct bpf_func_proto bpf_get_numa_node_id_proto;
|
||||
extern const struct bpf_func_proto bpf_tail_call_proto;
|
||||
extern const struct bpf_func_proto bpf_ktime_get_ns_proto;
|
||||
extern const struct bpf_func_proto bpf_ktime_get_boot_ns_proto;
|
||||
extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto;
|
||||
extern const struct bpf_func_proto bpf_get_current_uid_gid_proto;
|
||||
extern const struct bpf_func_proto bpf_get_current_comm_proto;
|
||||
|
@ -1523,6 +1548,7 @@ extern const struct bpf_func_proto bpf_strtoul_proto;
|
|||
extern const struct bpf_func_proto bpf_tcp_sock_proto;
|
||||
extern const struct bpf_func_proto bpf_jiffies64_proto;
|
||||
extern const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto;
|
||||
extern const struct bpf_func_proto bpf_event_output_data_proto;
|
||||
|
||||
const struct bpf_func_proto *bpf_tracing_func_proto(
|
||||
enum bpf_func_id func_id, const struct bpf_prog *prog);
|
||||
|
@ -1530,6 +1556,7 @@ const struct bpf_func_proto *bpf_tracing_func_proto(
|
|||
/* Shared helpers among cBPF and eBPF. */
|
||||
void bpf_user_rnd_init_once(void);
|
||||
u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
|
||||
u64 bpf_get_raw_cpu_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
|
||||
|
||||
#if defined(CONFIG_NET)
|
||||
bool bpf_sock_common_is_valid_access(int off, int size,
|
||||
|
|
|
@ -118,3 +118,9 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_STACK, stack_map_ops)
|
|||
#if defined(CONFIG_BPF_JIT)
|
||||
BPF_MAP_TYPE(BPF_MAP_TYPE_STRUCT_OPS, bpf_struct_ops_map_ops)
|
||||
#endif
|
||||
|
||||
BPF_LINK_TYPE(BPF_LINK_TYPE_RAW_TRACEPOINT, raw_tracepoint)
|
||||
BPF_LINK_TYPE(BPF_LINK_TYPE_TRACING, tracing)
|
||||
#ifdef CONFIG_CGROUP_BPF
|
||||
BPF_LINK_TYPE(BPF_LINK_TYPE_CGROUP, cgroup)
|
||||
#endif
|
||||
|
|
|
@ -86,7 +86,7 @@ static inline unsigned long compact_gap(unsigned int order)
|
|||
#ifdef CONFIG_COMPACTION
|
||||
extern int sysctl_compact_memory;
|
||||
extern int sysctl_compaction_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *length, loff_t *ppos);
|
||||
void *buffer, size_t *length, loff_t *ppos);
|
||||
extern int sysctl_extfrag_threshold;
|
||||
extern int sysctl_compact_unevictable_allowed;
|
||||
|
||||
|
|
|
@ -22,4 +22,8 @@ extern void do_coredump(const kernel_siginfo_t *siginfo);
|
|||
static inline void do_coredump(const kernel_siginfo_t *siginfo) {}
|
||||
#endif
|
||||
|
||||
extern int core_uses_pid;
|
||||
extern char core_pattern[];
|
||||
extern unsigned int core_pipe_limit;
|
||||
|
||||
#endif /* _LINUX_COREDUMP_H */
|
||||
|
|
|
@ -94,4 +94,6 @@ extern void fd_install(unsigned int fd, struct file *file);
|
|||
extern void flush_delayed_fput(void);
|
||||
extern void __fput_sync(struct file *);
|
||||
|
||||
extern unsigned int sysctl_nr_open_min, sysctl_nr_open_max;
|
||||
|
||||
#endif /* __LINUX_FILE_H */
|
||||
|
|
|
@ -863,8 +863,6 @@ int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog);
|
|||
int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
|
||||
bpf_aux_classic_check_t trans, bool save_orig);
|
||||
void bpf_prog_destroy(struct bpf_prog *fp);
|
||||
const struct bpf_func_proto *
|
||||
bpf_base_func_proto(enum bpf_func_id func_id);
|
||||
|
||||
int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
|
||||
int sk_attach_bpf(u32 ufd, struct sock *sk);
|
||||
|
|
|
@ -3536,11 +3536,11 @@ ssize_t simple_attr_write(struct file *file, const char __user *buf,
|
|||
|
||||
struct ctl_table;
|
||||
int proc_nr_files(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos);
|
||||
void *buffer, size_t *lenp, loff_t *ppos);
|
||||
int proc_nr_dentry(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos);
|
||||
void *buffer, size_t *lenp, loff_t *ppos);
|
||||
int proc_nr_inodes(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos);
|
||||
void *buffer, size_t *lenp, loff_t *ppos);
|
||||
int __init get_filesystem_list(char *buf);
|
||||
|
||||
#define __FMODE_EXEC ((__force int) FMODE_EXEC)
|
||||
|
|
|
@ -1005,8 +1005,7 @@ extern void disable_trace_on_warning(void);
|
|||
extern int __disable_trace_on_warning;
|
||||
|
||||
int tracepoint_printk_sysctl(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
loff_t *ppos);
|
||||
void *buffer, size_t *lenp, loff_t *ppos);
|
||||
|
||||
#else /* CONFIG_TRACING */
|
||||
static inline void disable_trace_on_warning(void) { }
|
||||
|
|
|
@ -105,14 +105,13 @@ struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
|
|||
void hugepage_put_subpool(struct hugepage_subpool *spool);
|
||||
|
||||
void reset_vma_resv_huge_pages(struct vm_area_struct *vma);
|
||||
int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
|
||||
int hugetlb_overcommit_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
|
||||
int hugetlb_treat_movable_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *);
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int,
|
||||
void __user *, size_t *, loff_t *);
|
||||
#endif
|
||||
int hugetlb_sysctl_handler(struct ctl_table *, int, void *, size_t *, loff_t *);
|
||||
int hugetlb_overcommit_handler(struct ctl_table *, int, void *, size_t *,
|
||||
loff_t *);
|
||||
int hugetlb_treat_movable_handler(struct ctl_table *, int, void *, size_t *,
|
||||
loff_t *);
|
||||
int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, void *, size_t *,
|
||||
loff_t *);
|
||||
|
||||
int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
|
||||
long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
|
||||
|
|
|
@ -312,7 +312,7 @@ DEFINE_INSN_CACHE_OPS(optinsn);
|
|||
#ifdef CONFIG_SYSCTL
|
||||
extern int sysctl_kprobes_optimization;
|
||||
extern int proc_kprobes_optimization_handler(struct ctl_table *table,
|
||||
int write, void __user *buffer,
|
||||
int write, void *buffer,
|
||||
size_t *length, loff_t *ppos);
|
||||
#endif
|
||||
extern void wait_for_kprobe_optimizer(void);
|
||||
|
|
|
@ -38,8 +38,8 @@ account_scheduler_latency(struct task_struct *task, int usecs, int inter)
|
|||
|
||||
void clear_tsk_latency_tracing(struct task_struct *p);
|
||||
|
||||
extern int sysctl_latencytop(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos);
|
||||
int sysctl_latencytop(struct ctl_table *table, int write, void *buffer,
|
||||
size_t *lenp, loff_t *ppos);
|
||||
|
||||
#else
|
||||
|
||||
|
|
|
@ -201,10 +201,10 @@ extern int sysctl_overcommit_memory;
|
|||
extern int sysctl_overcommit_ratio;
|
||||
extern unsigned long sysctl_overcommit_kbytes;
|
||||
|
||||
extern int overcommit_ratio_handler(struct ctl_table *, int, void __user *,
|
||||
size_t *, loff_t *);
|
||||
extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *,
|
||||
size_t *, loff_t *);
|
||||
int overcommit_ratio_handler(struct ctl_table *, int, void *, size_t *,
|
||||
loff_t *);
|
||||
int overcommit_kbytes_handler(struct ctl_table *, int, void *, size_t *,
|
||||
loff_t *);
|
||||
|
||||
#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
|
||||
|
||||
|
@ -2957,8 +2957,8 @@ extern bool process_shares_mm(struct task_struct *p, struct mm_struct *mm);
|
|||
|
||||
#ifdef CONFIG_SYSCTL
|
||||
extern int sysctl_drop_caches;
|
||||
int drop_caches_sysctl_handler(struct ctl_table *, int,
|
||||
void __user *, size_t *, loff_t *);
|
||||
int drop_caches_sysctl_handler(struct ctl_table *, int, void *, size_t *,
|
||||
loff_t *);
|
||||
#endif
|
||||
|
||||
void drop_slab(void);
|
||||
|
@ -3140,5 +3140,7 @@ unsigned long wp_shared_mapping_range(struct address_space *mapping,
|
|||
pgoff_t first_index, pgoff_t nr);
|
||||
#endif
|
||||
|
||||
extern int sysctl_nr_trim_pages;
|
||||
|
||||
#endif /* __KERNEL__ */
|
||||
#endif /* _LINUX_MM_H */
|
||||
|
|
|
@ -909,24 +909,23 @@ static inline int is_highmem(struct zone *zone)
|
|||
|
||||
/* These two functions are used to setup the per zone pages min values */
|
||||
struct ctl_table;
|
||||
int min_free_kbytes_sysctl_handler(struct ctl_table *, int,
|
||||
void __user *, size_t *, loff_t *);
|
||||
int watermark_boost_factor_sysctl_handler(struct ctl_table *, int,
|
||||
void __user *, size_t *, loff_t *);
|
||||
int watermark_scale_factor_sysctl_handler(struct ctl_table *, int,
|
||||
void __user *, size_t *, loff_t *);
|
||||
extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES];
|
||||
int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int,
|
||||
void __user *, size_t *, loff_t *);
|
||||
int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int,
|
||||
void __user *, size_t *, loff_t *);
|
||||
int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
|
||||
void __user *, size_t *, loff_t *);
|
||||
int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
|
||||
void __user *, size_t *, loff_t *);
|
||||
|
||||
extern int numa_zonelist_order_handler(struct ctl_table *, int,
|
||||
void __user *, size_t *, loff_t *);
|
||||
int min_free_kbytes_sysctl_handler(struct ctl_table *, int, void *, size_t *,
|
||||
loff_t *);
|
||||
int watermark_scale_factor_sysctl_handler(struct ctl_table *, int, void *,
|
||||
size_t *, loff_t *);
|
||||
extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES];
|
||||
int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, void *,
|
||||
size_t *, loff_t *);
|
||||
int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int,
|
||||
void *, size_t *, loff_t *);
|
||||
int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
|
||||
void *, size_t *, loff_t *);
|
||||
int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
|
||||
void *, size_t *, loff_t *);
|
||||
int numa_zonelist_order_handler(struct ctl_table *, int,
|
||||
void *, size_t *, loff_t *);
|
||||
extern int percpu_pagelist_fraction;
|
||||
extern char numa_zonelist_order[];
|
||||
#define NUMA_ZONELIST_ORDER_LEN 16
|
||||
|
||||
|
|
|
@ -202,16 +202,11 @@ static inline void watchdog_update_hrtimer_threshold(u64 period) { }
|
|||
#endif
|
||||
|
||||
struct ctl_table;
|
||||
extern int proc_watchdog(struct ctl_table *, int ,
|
||||
void __user *, size_t *, loff_t *);
|
||||
extern int proc_nmi_watchdog(struct ctl_table *, int ,
|
||||
void __user *, size_t *, loff_t *);
|
||||
extern int proc_soft_watchdog(struct ctl_table *, int ,
|
||||
void __user *, size_t *, loff_t *);
|
||||
extern int proc_watchdog_thresh(struct ctl_table *, int ,
|
||||
void __user *, size_t *, loff_t *);
|
||||
extern int proc_watchdog_cpumask(struct ctl_table *, int,
|
||||
void __user *, size_t *, loff_t *);
|
||||
int proc_watchdog(struct ctl_table *, int, void *, size_t *, loff_t *);
|
||||
int proc_nmi_watchdog(struct ctl_table *, int , void *, size_t *, loff_t *);
|
||||
int proc_soft_watchdog(struct ctl_table *, int , void *, size_t *, loff_t *);
|
||||
int proc_watchdog_thresh(struct ctl_table *, int , void *, size_t *, loff_t *);
|
||||
int proc_watchdog_cpumask(struct ctl_table *, int, void *, size_t *, loff_t *);
|
||||
|
||||
#ifdef CONFIG_HAVE_ACPI_APEI_NMI
|
||||
#include <asm/nmi.h>
|
||||
|
|
|
@ -1280,15 +1280,12 @@ extern int sysctl_perf_cpu_time_max_percent;
|
|||
|
||||
extern void perf_sample_event_took(u64 sample_len_ns);
|
||||
|
||||
extern int perf_proc_update_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
loff_t *ppos);
|
||||
extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
loff_t *ppos);
|
||||
|
||||
int perf_proc_update_handler(struct ctl_table *table, int write,
|
||||
void *buffer, size_t *lenp, loff_t *ppos);
|
||||
int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
|
||||
void *buffer, size_t *lenp, loff_t *ppos);
|
||||
int perf_event_max_stack_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos);
|
||||
void *buffer, size_t *lenp, loff_t *ppos);
|
||||
|
||||
/* Access to perf_event_open(2) syscall. */
|
||||
#define PERF_SECURITY_OPEN 0
|
||||
|
|
|
@ -108,6 +108,9 @@ extern void transfer_pid(struct task_struct *old, struct task_struct *new,
|
|||
struct pid_namespace;
|
||||
extern struct pid_namespace init_pid_ns;
|
||||
|
||||
extern int pid_max;
|
||||
extern int pid_max_min, pid_max_max;
|
||||
|
||||
/*
|
||||
* look up a PID in the hash table. Must be called with the tasklist_lock
|
||||
* or rcu_read_lock() held.
|
||||
|
|
|
@ -189,7 +189,7 @@ extern int printk_delay_msec;
|
|||
extern int dmesg_restrict;
|
||||
|
||||
extern int
|
||||
devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write, void __user *buf,
|
||||
devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write, void *buf,
|
||||
size_t *lenp, loff_t *ppos);
|
||||
|
||||
extern void wake_up_klogd(void);
|
||||
|
|
|
@ -12,9 +12,8 @@ extern unsigned int sysctl_hung_task_panic;
|
|||
extern unsigned long sysctl_hung_task_timeout_secs;
|
||||
extern unsigned long sysctl_hung_task_check_interval_secs;
|
||||
extern int sysctl_hung_task_warnings;
|
||||
extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
|
||||
void __user *buffer,
|
||||
size_t *lenp, loff_t *ppos);
|
||||
int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
|
||||
void *buffer, size_t *lenp, loff_t *ppos);
|
||||
#else
|
||||
/* Avoid need for ifdefs elsewhere in the code */
|
||||
enum { sysctl_hung_task_timeout_secs = 0 };
|
||||
|
@ -43,8 +42,7 @@ extern __read_mostly unsigned int sysctl_sched_migration_cost;
|
|||
extern __read_mostly unsigned int sysctl_sched_nr_migrate;
|
||||
|
||||
int sched_proc_update_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *length,
|
||||
loff_t *ppos);
|
||||
void *buffer, size_t *length, loff_t *ppos);
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -72,33 +70,21 @@ extern unsigned int sysctl_sched_autogroup_enabled;
|
|||
extern int sysctl_sched_rr_timeslice;
|
||||
extern int sched_rr_timeslice;
|
||||
|
||||
extern int sched_rr_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
loff_t *ppos);
|
||||
|
||||
extern int sched_rt_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
loff_t *ppos);
|
||||
|
||||
#ifdef CONFIG_UCLAMP_TASK
|
||||
extern int sysctl_sched_uclamp_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
loff_t *ppos);
|
||||
#endif
|
||||
|
||||
extern int sysctl_numa_balancing(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
loff_t *ppos);
|
||||
|
||||
extern int sysctl_schedstats(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
loff_t *ppos);
|
||||
int sched_rr_handler(struct ctl_table *table, int write, void *buffer,
|
||||
size_t *lenp, loff_t *ppos);
|
||||
int sched_rt_handler(struct ctl_table *table, int write, void *buffer,
|
||||
size_t *lenp, loff_t *ppos);
|
||||
int sysctl_sched_uclamp_handler(struct ctl_table *table, int write,
|
||||
void *buffer, size_t *lenp, loff_t *ppos);
|
||||
int sysctl_numa_balancing(struct ctl_table *table, int write, void *buffer,
|
||||
size_t *lenp, loff_t *ppos);
|
||||
int sysctl_schedstats(struct ctl_table *table, int write, void *buffer,
|
||||
size_t *lenp, loff_t *ppos);
|
||||
|
||||
#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
|
||||
extern unsigned int sysctl_sched_energy_aware;
|
||||
extern int sched_energy_aware_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
loff_t *ppos);
|
||||
int sched_energy_aware_handler(struct ctl_table *table, int write,
|
||||
void *buffer, size_t *lenp, loff_t *ppos);
|
||||
#endif
|
||||
|
||||
#endif /* _LINUX_SCHED_SYSCTL_H */
|
||||
|
|
|
@ -211,7 +211,7 @@ struct request_sock;
|
|||
|
||||
#ifdef CONFIG_MMU
|
||||
extern int mmap_min_addr_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos);
|
||||
void *buffer, size_t *lenp, loff_t *ppos);
|
||||
#endif
|
||||
|
||||
/* security_inode_init_security callback function to write xattrs */
|
||||
|
|
|
@ -44,35 +44,26 @@ struct ctl_dir;
|
|||
|
||||
extern const int sysctl_vals[];
|
||||
|
||||
typedef int proc_handler (struct ctl_table *ctl, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos);
|
||||
typedef int proc_handler(struct ctl_table *ctl, int write, void *buffer,
|
||||
size_t *lenp, loff_t *ppos);
|
||||
|
||||
extern int proc_dostring(struct ctl_table *, int,
|
||||
void __user *, size_t *, loff_t *);
|
||||
extern int proc_dointvec(struct ctl_table *, int,
|
||||
void __user *, size_t *, loff_t *);
|
||||
extern int proc_douintvec(struct ctl_table *, int,
|
||||
void __user *, size_t *, loff_t *);
|
||||
extern int proc_dointvec_minmax(struct ctl_table *, int,
|
||||
void __user *, size_t *, loff_t *);
|
||||
extern int proc_douintvec_minmax(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
loff_t *ppos);
|
||||
extern int proc_dointvec_jiffies(struct ctl_table *, int,
|
||||
void __user *, size_t *, loff_t *);
|
||||
extern int proc_dointvec_userhz_jiffies(struct ctl_table *, int,
|
||||
void __user *, size_t *, loff_t *);
|
||||
extern int proc_dointvec_ms_jiffies(struct ctl_table *, int,
|
||||
void __user *, size_t *, loff_t *);
|
||||
extern int proc_doulongvec_minmax(struct ctl_table *, int,
|
||||
void __user *, size_t *, loff_t *);
|
||||
extern int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int,
|
||||
void __user *, size_t *, loff_t *);
|
||||
extern int proc_do_large_bitmap(struct ctl_table *, int,
|
||||
void __user *, size_t *, loff_t *);
|
||||
extern int proc_do_static_key(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
loff_t *ppos);
|
||||
int proc_dostring(struct ctl_table *, int, void *, size_t *, loff_t *);
|
||||
int proc_dointvec(struct ctl_table *, int, void *, size_t *, loff_t *);
|
||||
int proc_douintvec(struct ctl_table *, int, void *, size_t *, loff_t *);
|
||||
int proc_dointvec_minmax(struct ctl_table *, int, void *, size_t *, loff_t *);
|
||||
int proc_douintvec_minmax(struct ctl_table *table, int write, void *buffer,
|
||||
size_t *lenp, loff_t *ppos);
|
||||
int proc_dointvec_jiffies(struct ctl_table *, int, void *, size_t *, loff_t *);
|
||||
int proc_dointvec_userhz_jiffies(struct ctl_table *, int, void *, size_t *,
|
||||
loff_t *);
|
||||
int proc_dointvec_ms_jiffies(struct ctl_table *, int, void *, size_t *,
|
||||
loff_t *);
|
||||
int proc_doulongvec_minmax(struct ctl_table *, int, void *, size_t *, loff_t *);
|
||||
int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int, void *,
|
||||
size_t *, loff_t *);
|
||||
int proc_do_large_bitmap(struct ctl_table *, int, void *, size_t *, loff_t *);
|
||||
int proc_do_static_key(struct ctl_table *table, int write, void *buffer,
|
||||
size_t *lenp, loff_t *ppos);
|
||||
|
||||
/*
|
||||
* Register a set of sysctl names by calling register_sysctl_table
|
||||
|
@ -207,7 +198,15 @@ void unregister_sysctl_table(struct ctl_table_header * table);
|
|||
|
||||
extern int sysctl_init(void);
|
||||
|
||||
extern int pwrsw_enabled;
|
||||
extern int unaligned_enabled;
|
||||
extern int unaligned_dump_stack;
|
||||
extern int no_unaligned_warning;
|
||||
|
||||
extern struct ctl_table sysctl_mount_point[];
|
||||
extern struct ctl_table random_table[];
|
||||
extern struct ctl_table firmware_config_table[];
|
||||
extern struct ctl_table epoll_table[];
|
||||
|
||||
#else /* CONFIG_SYSCTL */
|
||||
static inline struct ctl_table_header *register_sysctl_table(struct ctl_table * table)
|
||||
|
@ -238,7 +237,7 @@ static inline void setup_sysctl_set(struct ctl_table_set *p,
|
|||
|
||||
#endif /* CONFIG_SYSCTL */
|
||||
|
||||
int sysctl_max_threads(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos);
|
||||
int sysctl_max_threads(struct ctl_table *table, int write, void *buffer,
|
||||
size_t *lenp, loff_t *ppos);
|
||||
|
||||
#endif /* _LINUX_SYSCTL_H */
|
||||
|
|
|
@ -201,8 +201,7 @@ struct ctl_table;
|
|||
|
||||
extern unsigned int sysctl_timer_migration;
|
||||
int timer_migration_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
loff_t *ppos);
|
||||
void *buffer, size_t *lenp, loff_t *ppos);
|
||||
#endif
|
||||
|
||||
unsigned long __round_jiffies(unsigned long j, int cpu);
|
||||
|
|
|
@ -16,8 +16,8 @@ extern int sysctl_stat_interval;
|
|||
#define DISABLE_NUMA_STAT 0
|
||||
extern int sysctl_vm_numa_stat;
|
||||
DECLARE_STATIC_KEY_TRUE(vm_numa_stat_key);
|
||||
extern int sysctl_vm_numa_stat_handler(struct ctl_table *table,
|
||||
int write, void __user *buffer, size_t *length, loff_t *ppos);
|
||||
int sysctl_vm_numa_stat_handler(struct ctl_table *table, int write,
|
||||
void *buffer, size_t *length, loff_t *ppos);
|
||||
#endif
|
||||
|
||||
struct reclaim_stat {
|
||||
|
@ -274,8 +274,8 @@ void cpu_vm_stats_fold(int cpu);
|
|||
void refresh_zone_stat_thresholds(void);
|
||||
|
||||
struct ctl_table;
|
||||
int vmstat_refresh(struct ctl_table *, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos);
|
||||
int vmstat_refresh(struct ctl_table *, int write, void *buffer, size_t *lenp,
|
||||
loff_t *ppos);
|
||||
|
||||
void drain_zonestat(struct zone *zone, struct per_cpu_pageset *);
|
||||
|
||||
|
|
|
@ -362,24 +362,18 @@ extern int vm_highmem_is_dirtyable;
|
|||
extern int block_dump;
|
||||
extern int laptop_mode;
|
||||
|
||||
extern int dirty_background_ratio_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
loff_t *ppos);
|
||||
extern int dirty_background_bytes_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
loff_t *ppos);
|
||||
extern int dirty_ratio_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
loff_t *ppos);
|
||||
extern int dirty_bytes_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
loff_t *ppos);
|
||||
int dirty_background_ratio_handler(struct ctl_table *table, int write,
|
||||
void *buffer, size_t *lenp, loff_t *ppos);
|
||||
int dirty_background_bytes_handler(struct ctl_table *table, int write,
|
||||
void *buffer, size_t *lenp, loff_t *ppos);
|
||||
int dirty_ratio_handler(struct ctl_table *table, int write,
|
||||
void *buffer, size_t *lenp, loff_t *ppos);
|
||||
int dirty_bytes_handler(struct ctl_table *table, int write,
|
||||
void *buffer, size_t *lenp, loff_t *ppos);
|
||||
int dirtytime_interval_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos);
|
||||
|
||||
struct ctl_table;
|
||||
int dirty_writeback_centisecs_handler(struct ctl_table *, int,
|
||||
void __user *, size_t *, loff_t *);
|
||||
void *buffer, size_t *lenp, loff_t *ppos);
|
||||
int dirty_writeback_centisecs_handler(struct ctl_table *table, int write,
|
||||
void *buffer, size_t *lenp, loff_t *ppos);
|
||||
|
||||
void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty);
|
||||
unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh);
|
||||
|
|
|
@ -113,6 +113,9 @@ enum bpf_cmd {
|
|||
BPF_MAP_DELETE_BATCH,
|
||||
BPF_LINK_CREATE,
|
||||
BPF_LINK_UPDATE,
|
||||
BPF_LINK_GET_FD_BY_ID,
|
||||
BPF_LINK_GET_NEXT_ID,
|
||||
BPF_ENABLE_STATS,
|
||||
};
|
||||
|
||||
enum bpf_map_type {
|
||||
|
@ -220,6 +223,15 @@ enum bpf_attach_type {
|
|||
|
||||
#define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE
|
||||
|
||||
enum bpf_link_type {
|
||||
BPF_LINK_TYPE_UNSPEC = 0,
|
||||
BPF_LINK_TYPE_RAW_TRACEPOINT = 1,
|
||||
BPF_LINK_TYPE_TRACING = 2,
|
||||
BPF_LINK_TYPE_CGROUP = 3,
|
||||
|
||||
MAX_BPF_LINK_TYPE,
|
||||
};
|
||||
|
||||
/* cgroup-bpf attach flags used in BPF_PROG_ATTACH command
|
||||
*
|
||||
* NONE(default): No further bpf programs allowed in the subtree.
|
||||
|
@ -379,6 +391,12 @@ enum {
|
|||
*/
|
||||
#define BPF_F_QUERY_EFFECTIVE (1U << 0)
|
||||
|
||||
/* type for BPF_ENABLE_STATS */
|
||||
enum bpf_stats_type {
|
||||
/* enabled run_time_ns and run_cnt */
|
||||
BPF_STATS_RUN_TIME = 0,
|
||||
};
|
||||
|
||||
enum bpf_stack_build_id_status {
|
||||
/* user space need an empty entry to identify end of a trace */
|
||||
BPF_STACK_BUILD_ID_EMPTY = 0,
|
||||
|
@ -523,6 +541,7 @@ union bpf_attr {
|
|||
__u32 prog_id;
|
||||
__u32 map_id;
|
||||
__u32 btf_id;
|
||||
__u32 link_id;
|
||||
};
|
||||
__u32 next_id;
|
||||
__u32 open_flags;
|
||||
|
@ -589,6 +608,10 @@ union bpf_attr {
|
|||
__u32 old_prog_fd;
|
||||
} link_update;
|
||||
|
||||
struct { /* struct used by BPF_ENABLE_STATS command */
|
||||
__u32 type;
|
||||
} enable_stats;
|
||||
|
||||
} __attribute__((aligned(8)));
|
||||
|
||||
/* The description below is an attempt at providing documentation to eBPF
|
||||
|
@ -652,6 +675,8 @@ union bpf_attr {
|
|||
* u64 bpf_ktime_get_ns(void)
|
||||
* Description
|
||||
* Return the time elapsed since system boot, in nanoseconds.
|
||||
* Does not include time the system was suspended.
|
||||
* See: clock_gettime(CLOCK_MONOTONIC)
|
||||
* Return
|
||||
* Current *ktime*.
|
||||
*
|
||||
|
@ -1562,7 +1587,7 @@ union bpf_attr {
|
|||
* Return
|
||||
* 0
|
||||
*
|
||||
* int bpf_setsockopt(struct bpf_sock_ops *bpf_socket, int level, int optname, void *optval, int optlen)
|
||||
* int bpf_setsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen)
|
||||
* Description
|
||||
* Emulate a call to **setsockopt()** on the socket associated to
|
||||
* *bpf_socket*, which must be a full socket. The *level* at
|
||||
|
@ -1570,6 +1595,11 @@ union bpf_attr {
|
|||
* must be specified, see **setsockopt(2)** for more information.
|
||||
* The option value of length *optlen* is pointed by *optval*.
|
||||
*
|
||||
* *bpf_socket* should be one of the following:
|
||||
* * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**.
|
||||
* * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT**
|
||||
* and **BPF_CGROUP_INET6_CONNECT**.
|
||||
*
|
||||
* This helper actually implements a subset of **setsockopt()**.
|
||||
* It supports the following *level*\ s:
|
||||
*
|
||||
|
@ -1764,7 +1794,7 @@ union bpf_attr {
|
|||
* Return
|
||||
* 0 on success, or a negative error in case of failure.
|
||||
*
|
||||
* int bpf_getsockopt(struct bpf_sock_ops *bpf_socket, int level, int optname, void *optval, int optlen)
|
||||
* int bpf_getsockopt(void *bpf_socket, int level, int optname, void *optval, int optlen)
|
||||
* Description
|
||||
* Emulate a call to **getsockopt()** on the socket associated to
|
||||
* *bpf_socket*, which must be a full socket. The *level* at
|
||||
|
@ -1773,6 +1803,11 @@ union bpf_attr {
|
|||
* The retrieved value is stored in the structure pointed by
|
||||
* *opval* and of length *optlen*.
|
||||
*
|
||||
* *bpf_socket* should be one of the following:
|
||||
* * **struct bpf_sock_ops** for **BPF_PROG_TYPE_SOCK_OPS**.
|
||||
* * **struct bpf_sock_addr** for **BPF_CGROUP_INET4_CONNECT**
|
||||
* and **BPF_CGROUP_INET6_CONNECT**.
|
||||
*
|
||||
* This helper actually implements a subset of **getsockopt()**.
|
||||
* It supports the following *level*\ s:
|
||||
*
|
||||
|
@ -3025,6 +3060,14 @@ union bpf_attr {
|
|||
* * **-EOPNOTSUPP** Unsupported operation, for example a
|
||||
* call from outside of TC ingress.
|
||||
* * **-ESOCKTNOSUPPORT** Socket type not supported (reuseport).
|
||||
*
|
||||
* u64 bpf_ktime_get_boot_ns(void)
|
||||
* Description
|
||||
* Return the time elapsed since system boot, in nanoseconds.
|
||||
* Does include the time the system was suspended.
|
||||
* See: clock_gettime(CLOCK_BOOTTIME)
|
||||
* Return
|
||||
* Current *ktime*.
|
||||
*/
|
||||
#define __BPF_FUNC_MAPPER(FN) \
|
||||
FN(unspec), \
|
||||
|
@ -3151,7 +3194,8 @@ union bpf_attr {
|
|||
FN(xdp_output), \
|
||||
FN(get_netns_cookie), \
|
||||
FN(get_current_ancestor_cgroup_id), \
|
||||
FN(sk_assign),
|
||||
FN(sk_assign), \
|
||||
FN(ktime_get_boot_ns),
|
||||
|
||||
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
|
||||
* function eBPF program intends to call
|
||||
|
@ -3598,6 +3642,25 @@ struct bpf_btf_info {
|
|||
__u32 id;
|
||||
} __attribute__((aligned(8)));
|
||||
|
||||
struct bpf_link_info {
|
||||
__u32 type;
|
||||
__u32 id;
|
||||
__u32 prog_id;
|
||||
union {
|
||||
struct {
|
||||
__aligned_u64 tp_name; /* in/out: tp_name buffer ptr */
|
||||
__u32 tp_name_len; /* in/out: tp_name buffer len */
|
||||
} raw_tracepoint;
|
||||
struct {
|
||||
__u32 attach_type;
|
||||
} tracing;
|
||||
struct {
|
||||
__u64 cgroup_id;
|
||||
__u32 attach_type;
|
||||
} cgroup;
|
||||
};
|
||||
} __attribute__((aligned(8)));
|
||||
|
||||
/* User bpf_sock_addr struct to access socket fields and sockaddr struct passed
|
||||
* by user and intended to be used by socket (e.g. to bind to, depends on
|
||||
* attach attach type).
|
||||
|
|
|
@ -24,7 +24,7 @@ static void *get_ipc(struct ctl_table *table)
|
|||
|
||||
#ifdef CONFIG_PROC_SYSCTL
|
||||
static int proc_ipc_dointvec(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
struct ctl_table ipc_table;
|
||||
|
||||
|
@ -35,7 +35,7 @@ static int proc_ipc_dointvec(struct ctl_table *table, int write,
|
|||
}
|
||||
|
||||
static int proc_ipc_dointvec_minmax(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
struct ctl_table ipc_table;
|
||||
|
||||
|
@ -46,7 +46,7 @@ static int proc_ipc_dointvec_minmax(struct ctl_table *table, int write,
|
|||
}
|
||||
|
||||
static int proc_ipc_dointvec_minmax_orphans(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
struct ipc_namespace *ns = current->nsproxy->ipc_ns;
|
||||
int err = proc_ipc_dointvec_minmax(table, write, buffer, lenp, ppos);
|
||||
|
@ -59,7 +59,7 @@ static int proc_ipc_dointvec_minmax_orphans(struct ctl_table *table, int write,
|
|||
}
|
||||
|
||||
static int proc_ipc_doulongvec_minmax(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
struct ctl_table ipc_table;
|
||||
memcpy(&ipc_table, table, sizeof(ipc_table));
|
||||
|
@ -70,7 +70,7 @@ static int proc_ipc_doulongvec_minmax(struct ctl_table *table, int write,
|
|||
}
|
||||
|
||||
static int proc_ipc_auto_msgmni(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
struct ctl_table ipc_table;
|
||||
int dummy = 0;
|
||||
|
|
|
@ -19,7 +19,7 @@ static void *get_mq(struct ctl_table *table)
|
|||
}
|
||||
|
||||
static int proc_mq_dointvec(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
struct ctl_table mq_table;
|
||||
memcpy(&mq_table, table, sizeof(mq_table));
|
||||
|
@ -29,7 +29,7 @@ static int proc_mq_dointvec(struct ctl_table *table, int write,
|
|||
}
|
||||
|
||||
static int proc_mq_dointvec_minmax(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
struct ctl_table mq_table;
|
||||
memcpy(&mq_table, table, sizeof(mq_table));
|
||||
|
|
|
@ -3482,6 +3482,7 @@ extern char __weak __stop_BTF[];
|
|||
extern struct btf *btf_vmlinux;
|
||||
|
||||
#define BPF_MAP_TYPE(_id, _ops)
|
||||
#define BPF_LINK_TYPE(_id, _name)
|
||||
static union {
|
||||
struct bpf_ctx_convert {
|
||||
#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
|
||||
|
@ -3508,6 +3509,7 @@ static u8 bpf_ctx_convert_map[] = {
|
|||
0, /* avoid empty array */
|
||||
};
|
||||
#undef BPF_MAP_TYPE
|
||||
#undef BPF_LINK_TYPE
|
||||
|
||||
static const struct btf_member *
|
||||
btf_get_prog_ctx_type(struct bpf_verifier_log *log, struct btf *btf,
|
||||
|
|
|
@ -557,8 +557,9 @@ static void replace_effective_prog(struct cgroup *cgrp,
|
|||
*
|
||||
* Must be called with cgroup_mutex held.
|
||||
*/
|
||||
int __cgroup_bpf_replace(struct cgroup *cgrp, struct bpf_cgroup_link *link,
|
||||
struct bpf_prog *new_prog)
|
||||
static int __cgroup_bpf_replace(struct cgroup *cgrp,
|
||||
struct bpf_cgroup_link *link,
|
||||
struct bpf_prog *new_prog)
|
||||
{
|
||||
struct list_head *progs = &cgrp->bpf.progs[link->type];
|
||||
struct bpf_prog *old_prog;
|
||||
|
@ -583,6 +584,30 @@ int __cgroup_bpf_replace(struct cgroup *cgrp, struct bpf_cgroup_link *link,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int cgroup_bpf_replace(struct bpf_link *link, struct bpf_prog *new_prog,
|
||||
struct bpf_prog *old_prog)
|
||||
{
|
||||
struct bpf_cgroup_link *cg_link;
|
||||
int ret;
|
||||
|
||||
cg_link = container_of(link, struct bpf_cgroup_link, link);
|
||||
|
||||
mutex_lock(&cgroup_mutex);
|
||||
/* link might have been auto-released by dying cgroup, so fail */
|
||||
if (!cg_link->cgroup) {
|
||||
ret = -EINVAL;
|
||||
goto out_unlock;
|
||||
}
|
||||
if (old_prog && link->prog != old_prog) {
|
||||
ret = -EPERM;
|
||||
goto out_unlock;
|
||||
}
|
||||
ret = __cgroup_bpf_replace(cg_link->cgroup, cg_link, new_prog);
|
||||
out_unlock:
|
||||
mutex_unlock(&cgroup_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct bpf_prog_list *find_detach_entry(struct list_head *progs,
|
||||
struct bpf_prog *prog,
|
||||
struct bpf_cgroup_link *link,
|
||||
|
@ -808,17 +833,56 @@ static void bpf_cgroup_link_dealloc(struct bpf_link *link)
|
|||
kfree(cg_link);
|
||||
}
|
||||
|
||||
const struct bpf_link_ops bpf_cgroup_link_lops = {
|
||||
static void bpf_cgroup_link_show_fdinfo(const struct bpf_link *link,
|
||||
struct seq_file *seq)
|
||||
{
|
||||
struct bpf_cgroup_link *cg_link =
|
||||
container_of(link, struct bpf_cgroup_link, link);
|
||||
u64 cg_id = 0;
|
||||
|
||||
mutex_lock(&cgroup_mutex);
|
||||
if (cg_link->cgroup)
|
||||
cg_id = cgroup_id(cg_link->cgroup);
|
||||
mutex_unlock(&cgroup_mutex);
|
||||
|
||||
seq_printf(seq,
|
||||
"cgroup_id:\t%llu\n"
|
||||
"attach_type:\t%d\n",
|
||||
cg_id,
|
||||
cg_link->type);
|
||||
}
|
||||
|
||||
static int bpf_cgroup_link_fill_link_info(const struct bpf_link *link,
|
||||
struct bpf_link_info *info)
|
||||
{
|
||||
struct bpf_cgroup_link *cg_link =
|
||||
container_of(link, struct bpf_cgroup_link, link);
|
||||
u64 cg_id = 0;
|
||||
|
||||
mutex_lock(&cgroup_mutex);
|
||||
if (cg_link->cgroup)
|
||||
cg_id = cgroup_id(cg_link->cgroup);
|
||||
mutex_unlock(&cgroup_mutex);
|
||||
|
||||
info->cgroup.cgroup_id = cg_id;
|
||||
info->cgroup.attach_type = cg_link->type;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct bpf_link_ops bpf_cgroup_link_lops = {
|
||||
.release = bpf_cgroup_link_release,
|
||||
.dealloc = bpf_cgroup_link_dealloc,
|
||||
.update_prog = cgroup_bpf_replace,
|
||||
.show_fdinfo = bpf_cgroup_link_show_fdinfo,
|
||||
.fill_link_info = bpf_cgroup_link_fill_link_info,
|
||||
};
|
||||
|
||||
int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
|
||||
{
|
||||
struct bpf_link_primer link_primer;
|
||||
struct bpf_cgroup_link *link;
|
||||
struct file *link_file;
|
||||
struct cgroup *cgrp;
|
||||
int err, link_fd;
|
||||
int err;
|
||||
|
||||
if (attr->link_create.flags)
|
||||
return -EINVAL;
|
||||
|
@ -832,26 +896,25 @@ int cgroup_bpf_link_attach(const union bpf_attr *attr, struct bpf_prog *prog)
|
|||
err = -ENOMEM;
|
||||
goto out_put_cgroup;
|
||||
}
|
||||
bpf_link_init(&link->link, &bpf_cgroup_link_lops, prog);
|
||||
bpf_link_init(&link->link, BPF_LINK_TYPE_CGROUP, &bpf_cgroup_link_lops,
|
||||
prog);
|
||||
link->cgroup = cgrp;
|
||||
link->type = attr->link_create.attach_type;
|
||||
|
||||
link_file = bpf_link_new_file(&link->link, &link_fd);
|
||||
if (IS_ERR(link_file)) {
|
||||
err = bpf_link_prime(&link->link, &link_primer);
|
||||
if (err) {
|
||||
kfree(link);
|
||||
err = PTR_ERR(link_file);
|
||||
goto out_put_cgroup;
|
||||
}
|
||||
|
||||
err = cgroup_bpf_attach(cgrp, NULL, NULL, link, link->type,
|
||||
BPF_F_ALLOW_MULTI);
|
||||
if (err) {
|
||||
bpf_link_cleanup(&link->link, link_file, link_fd);
|
||||
bpf_link_cleanup(&link_primer);
|
||||
goto out_put_cgroup;
|
||||
}
|
||||
|
||||
fd_install(link_fd, link_file);
|
||||
return link_fd;
|
||||
return bpf_link_settle(&link_primer);
|
||||
|
||||
out_put_cgroup:
|
||||
cgroup_put(cgrp);
|
||||
|
@ -1054,36 +1117,21 @@ int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
|
|||
|
||||
return !allow;
|
||||
}
|
||||
EXPORT_SYMBOL(__cgroup_bpf_check_dev_permission);
|
||||
|
||||
static const struct bpf_func_proto *
|
||||
cgroup_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
||||
{
|
||||
switch (func_id) {
|
||||
case BPF_FUNC_map_lookup_elem:
|
||||
return &bpf_map_lookup_elem_proto;
|
||||
case BPF_FUNC_map_update_elem:
|
||||
return &bpf_map_update_elem_proto;
|
||||
case BPF_FUNC_map_delete_elem:
|
||||
return &bpf_map_delete_elem_proto;
|
||||
case BPF_FUNC_map_push_elem:
|
||||
return &bpf_map_push_elem_proto;
|
||||
case BPF_FUNC_map_pop_elem:
|
||||
return &bpf_map_pop_elem_proto;
|
||||
case BPF_FUNC_map_peek_elem:
|
||||
return &bpf_map_peek_elem_proto;
|
||||
case BPF_FUNC_get_current_uid_gid:
|
||||
return &bpf_get_current_uid_gid_proto;
|
||||
case BPF_FUNC_get_local_storage:
|
||||
return &bpf_get_local_storage_proto;
|
||||
case BPF_FUNC_get_current_cgroup_id:
|
||||
return &bpf_get_current_cgroup_id_proto;
|
||||
case BPF_FUNC_trace_printk:
|
||||
if (capable(CAP_SYS_ADMIN))
|
||||
return bpf_get_trace_printk_proto();
|
||||
/* fall through */
|
||||
case BPF_FUNC_perf_event_output:
|
||||
return &bpf_event_output_data_proto;
|
||||
default:
|
||||
return NULL;
|
||||
return bpf_base_func_proto(func_id);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1137,16 +1185,13 @@ const struct bpf_verifier_ops cg_dev_verifier_ops = {
|
|||
* @head: sysctl table header
|
||||
* @table: sysctl table
|
||||
* @write: sysctl is being read (= 0) or written (= 1)
|
||||
* @buf: pointer to buffer passed by user space
|
||||
* @buf: pointer to buffer (in and out)
|
||||
* @pcount: value-result argument: value is size of buffer pointed to by @buf,
|
||||
* result is size of @new_buf if program set new value, initial value
|
||||
* otherwise
|
||||
* @ppos: value-result argument: value is position at which read from or write
|
||||
* to sysctl is happening, result is new position if program overrode it,
|
||||
* initial value otherwise
|
||||
* @new_buf: pointer to pointer to new buffer that will be allocated if program
|
||||
* overrides new value provided by user space on sysctl write
|
||||
* NOTE: it's caller responsibility to free *new_buf if it was set
|
||||
* @type: type of program to be executed
|
||||
*
|
||||
* Program is run when sysctl is being accessed, either read or written, and
|
||||
|
@ -1157,8 +1202,7 @@ const struct bpf_verifier_ops cg_dev_verifier_ops = {
|
|||
*/
|
||||
int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
|
||||
struct ctl_table *table, int write,
|
||||
void __user *buf, size_t *pcount,
|
||||
loff_t *ppos, void **new_buf,
|
||||
void **buf, size_t *pcount, loff_t *ppos,
|
||||
enum bpf_attach_type type)
|
||||
{
|
||||
struct bpf_sysctl_kern ctx = {
|
||||
|
@ -1173,36 +1217,28 @@ int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
|
|||
.new_updated = 0,
|
||||
};
|
||||
struct cgroup *cgrp;
|
||||
loff_t pos = 0;
|
||||
int ret;
|
||||
|
||||
ctx.cur_val = kmalloc_track_caller(ctx.cur_len, GFP_KERNEL);
|
||||
if (ctx.cur_val) {
|
||||
mm_segment_t old_fs;
|
||||
loff_t pos = 0;
|
||||
|
||||
old_fs = get_fs();
|
||||
set_fs(KERNEL_DS);
|
||||
if (table->proc_handler(table, 0, (void __user *)ctx.cur_val,
|
||||
&ctx.cur_len, &pos)) {
|
||||
/* Let BPF program decide how to proceed. */
|
||||
ctx.cur_len = 0;
|
||||
}
|
||||
set_fs(old_fs);
|
||||
} else {
|
||||
if (!ctx.cur_val ||
|
||||
table->proc_handler(table, 0, ctx.cur_val, &ctx.cur_len, &pos)) {
|
||||
/* Let BPF program decide how to proceed. */
|
||||
ctx.cur_len = 0;
|
||||
}
|
||||
|
||||
if (write && buf && *pcount) {
|
||||
if (write && *buf && *pcount) {
|
||||
/* BPF program should be able to override new value with a
|
||||
* buffer bigger than provided by user.
|
||||
*/
|
||||
ctx.new_val = kmalloc_track_caller(PAGE_SIZE, GFP_KERNEL);
|
||||
ctx.new_len = min_t(size_t, PAGE_SIZE, *pcount);
|
||||
if (!ctx.new_val ||
|
||||
copy_from_user(ctx.new_val, buf, ctx.new_len))
|
||||
if (ctx.new_val) {
|
||||
memcpy(ctx.new_val, *buf, ctx.new_len);
|
||||
} else {
|
||||
/* Let BPF program decide how to proceed. */
|
||||
ctx.new_len = 0;
|
||||
}
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
|
@ -1213,7 +1249,8 @@ int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
|
|||
kfree(ctx.cur_val);
|
||||
|
||||
if (ret == 1 && ctx.new_updated) {
|
||||
*new_buf = ctx.new_val;
|
||||
kfree(*buf);
|
||||
*buf = ctx.new_val;
|
||||
*pcount = ctx.new_len;
|
||||
} else {
|
||||
kfree(ctx.new_val);
|
||||
|
@ -1221,7 +1258,6 @@ int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
|
|||
|
||||
return ret == 1 ? 0 : -EPERM;
|
||||
}
|
||||
EXPORT_SYMBOL(__cgroup_bpf_run_filter_sysctl);
|
||||
|
||||
#ifdef CONFIG_NET
|
||||
static bool __cgroup_bpf_prog_array_is_empty(struct cgroup *cgrp,
|
||||
|
@ -1326,7 +1362,6 @@ int __cgroup_bpf_run_filter_setsockopt(struct sock *sk, int *level,
|
|||
sockopt_free_buf(&ctx);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(__cgroup_bpf_run_filter_setsockopt);
|
||||
|
||||
int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
|
||||
int optname, char __user *optval,
|
||||
|
@ -1413,7 +1448,6 @@ int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
|
|||
sockopt_free_buf(&ctx);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(__cgroup_bpf_run_filter_getsockopt);
|
||||
#endif
|
||||
|
||||
static ssize_t sysctl_cpy_dir(const struct ctl_dir *dir, char **bufp,
|
||||
|
|
|
@ -2136,6 +2136,11 @@ BPF_CALL_0(bpf_user_rnd_u32)
|
|||
return res;
|
||||
}
|
||||
|
||||
BPF_CALL_0(bpf_get_raw_cpu_id)
|
||||
{
|
||||
return raw_smp_processor_id();
|
||||
}
|
||||
|
||||
/* Weak definitions of helper functions in case we don't have bpf syscall. */
|
||||
const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
|
||||
const struct bpf_func_proto bpf_map_update_elem_proto __weak;
|
||||
|
@ -2151,6 +2156,7 @@ const struct bpf_func_proto bpf_get_prandom_u32_proto __weak;
|
|||
const struct bpf_func_proto bpf_get_smp_processor_id_proto __weak;
|
||||
const struct bpf_func_proto bpf_get_numa_node_id_proto __weak;
|
||||
const struct bpf_func_proto bpf_ktime_get_ns_proto __weak;
|
||||
const struct bpf_func_proto bpf_ktime_get_boot_ns_proto __weak;
|
||||
|
||||
const struct bpf_func_proto bpf_get_current_pid_tgid_proto __weak;
|
||||
const struct bpf_func_proto bpf_get_current_uid_gid_proto __weak;
|
||||
|
|
|
@ -151,7 +151,19 @@ BPF_CALL_0(bpf_ktime_get_ns)
|
|||
|
||||
const struct bpf_func_proto bpf_ktime_get_ns_proto = {
|
||||
.func = bpf_ktime_get_ns,
|
||||
.gpl_only = true,
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_INTEGER,
|
||||
};
|
||||
|
||||
BPF_CALL_0(bpf_ktime_get_boot_ns)
|
||||
{
|
||||
/* NMI safe access to clock boottime */
|
||||
return ktime_get_boot_fast_ns();
|
||||
}
|
||||
|
||||
const struct bpf_func_proto bpf_ktime_get_boot_ns_proto = {
|
||||
.func = bpf_ktime_get_boot_ns,
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_INTEGER,
|
||||
};
|
||||
|
||||
|
@ -562,3 +574,78 @@ const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto = {
|
|||
.arg3_type = ARG_PTR_TO_UNINIT_MEM,
|
||||
.arg4_type = ARG_CONST_SIZE,
|
||||
};
|
||||
|
||||
static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = {
|
||||
.func = bpf_get_raw_cpu_id,
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_INTEGER,
|
||||
};
|
||||
|
||||
BPF_CALL_5(bpf_event_output_data, void *, ctx, struct bpf_map *, map,
|
||||
u64, flags, void *, data, u64, size)
|
||||
{
|
||||
if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
|
||||
return -EINVAL;
|
||||
|
||||
return bpf_event_output(map, flags, data, size, NULL, 0, NULL);
|
||||
}
|
||||
|
||||
const struct bpf_func_proto bpf_event_output_data_proto = {
|
||||
.func = bpf_event_output_data,
|
||||
.gpl_only = true,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
.arg2_type = ARG_CONST_MAP_PTR,
|
||||
.arg3_type = ARG_ANYTHING,
|
||||
.arg4_type = ARG_PTR_TO_MEM,
|
||||
.arg5_type = ARG_CONST_SIZE_OR_ZERO,
|
||||
};
|
||||
|
||||
const struct bpf_func_proto *
|
||||
bpf_base_func_proto(enum bpf_func_id func_id)
|
||||
{
|
||||
switch (func_id) {
|
||||
case BPF_FUNC_map_lookup_elem:
|
||||
return &bpf_map_lookup_elem_proto;
|
||||
case BPF_FUNC_map_update_elem:
|
||||
return &bpf_map_update_elem_proto;
|
||||
case BPF_FUNC_map_delete_elem:
|
||||
return &bpf_map_delete_elem_proto;
|
||||
case BPF_FUNC_map_push_elem:
|
||||
return &bpf_map_push_elem_proto;
|
||||
case BPF_FUNC_map_pop_elem:
|
||||
return &bpf_map_pop_elem_proto;
|
||||
case BPF_FUNC_map_peek_elem:
|
||||
return &bpf_map_peek_elem_proto;
|
||||
case BPF_FUNC_get_prandom_u32:
|
||||
return &bpf_get_prandom_u32_proto;
|
||||
case BPF_FUNC_get_smp_processor_id:
|
||||
return &bpf_get_raw_smp_processor_id_proto;
|
||||
case BPF_FUNC_get_numa_node_id:
|
||||
return &bpf_get_numa_node_id_proto;
|
||||
case BPF_FUNC_tail_call:
|
||||
return &bpf_tail_call_proto;
|
||||
case BPF_FUNC_ktime_get_ns:
|
||||
return &bpf_ktime_get_ns_proto;
|
||||
case BPF_FUNC_ktime_get_boot_ns:
|
||||
return &bpf_ktime_get_boot_ns_proto;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return NULL;
|
||||
|
||||
switch (func_id) {
|
||||
case BPF_FUNC_spin_lock:
|
||||
return &bpf_spin_lock_proto;
|
||||
case BPF_FUNC_spin_unlock:
|
||||
return &bpf_spin_unlock_proto;
|
||||
case BPF_FUNC_trace_printk:
|
||||
return bpf_get_trace_printk_proto();
|
||||
case BPF_FUNC_jiffies64:
|
||||
return &bpf_jiffies64_proto;
|
||||
default:
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -42,6 +42,8 @@ static DEFINE_IDR(prog_idr);
|
|||
static DEFINE_SPINLOCK(prog_idr_lock);
|
||||
static DEFINE_IDR(map_idr);
|
||||
static DEFINE_SPINLOCK(map_idr_lock);
|
||||
static DEFINE_IDR(link_idr);
|
||||
static DEFINE_SPINLOCK(link_idr_lock);
|
||||
|
||||
int sysctl_unprivileged_bpf_disabled __read_mostly;
|
||||
|
||||
|
@ -49,9 +51,11 @@ static const struct bpf_map_ops * const bpf_map_types[] = {
|
|||
#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
|
||||
#define BPF_MAP_TYPE(_id, _ops) \
|
||||
[_id] = &_ops,
|
||||
#define BPF_LINK_TYPE(_id, _name)
|
||||
#include <linux/bpf_types.h>
|
||||
#undef BPF_PROG_TYPE
|
||||
#undef BPF_MAP_TYPE
|
||||
#undef BPF_LINK_TYPE
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -1546,9 +1550,11 @@ static const struct bpf_prog_ops * const bpf_prog_types[] = {
|
|||
#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
|
||||
[_id] = & _name ## _prog_ops,
|
||||
#define BPF_MAP_TYPE(_id, _ops)
|
||||
#define BPF_LINK_TYPE(_id, _name)
|
||||
#include <linux/bpf_types.h>
|
||||
#undef BPF_PROG_TYPE
|
||||
#undef BPF_MAP_TYPE
|
||||
#undef BPF_LINK_TYPE
|
||||
};
|
||||
|
||||
static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
|
||||
|
@ -2181,25 +2187,39 @@ static int bpf_obj_get(const union bpf_attr *attr)
|
|||
attr->file_flags);
|
||||
}
|
||||
|
||||
void bpf_link_init(struct bpf_link *link, const struct bpf_link_ops *ops,
|
||||
struct bpf_prog *prog)
|
||||
void bpf_link_init(struct bpf_link *link, enum bpf_link_type type,
|
||||
const struct bpf_link_ops *ops, struct bpf_prog *prog)
|
||||
{
|
||||
atomic64_set(&link->refcnt, 1);
|
||||
link->type = type;
|
||||
link->id = 0;
|
||||
link->ops = ops;
|
||||
link->prog = prog;
|
||||
}
|
||||
|
||||
static void bpf_link_free_id(int id)
|
||||
{
|
||||
if (!id)
|
||||
return;
|
||||
|
||||
spin_lock_bh(&link_idr_lock);
|
||||
idr_remove(&link_idr, id);
|
||||
spin_unlock_bh(&link_idr_lock);
|
||||
}
|
||||
|
||||
/* Clean up bpf_link and corresponding anon_inode file and FD. After
|
||||
* anon_inode is created, bpf_link can't be just kfree()'d due to deferred
|
||||
* anon_inode's release() call. This helper manages marking bpf_link as
|
||||
* defunct, releases anon_inode file and puts reserved FD.
|
||||
* anon_inode's release() call. This helper marksbpf_link as
|
||||
* defunct, releases anon_inode file and puts reserved FD. bpf_prog's refcnt
|
||||
* is not decremented, it's the responsibility of a calling code that failed
|
||||
* to complete bpf_link initialization.
|
||||
*/
|
||||
void bpf_link_cleanup(struct bpf_link *link, struct file *link_file,
|
||||
int link_fd)
|
||||
void bpf_link_cleanup(struct bpf_link_primer *primer)
|
||||
{
|
||||
link->prog = NULL;
|
||||
fput(link_file);
|
||||
put_unused_fd(link_fd);
|
||||
primer->link->prog = NULL;
|
||||
bpf_link_free_id(primer->id);
|
||||
fput(primer->file);
|
||||
put_unused_fd(primer->fd);
|
||||
}
|
||||
|
||||
void bpf_link_inc(struct bpf_link *link)
|
||||
|
@ -2210,6 +2230,7 @@ void bpf_link_inc(struct bpf_link *link)
|
|||
/* bpf_link_free is guaranteed to be called from process context */
|
||||
static void bpf_link_free(struct bpf_link *link)
|
||||
{
|
||||
bpf_link_free_id(link->id);
|
||||
if (link->prog) {
|
||||
/* detach BPF program, clean up used resources */
|
||||
link->ops->release(link);
|
||||
|
@ -2251,35 +2272,35 @@ static int bpf_link_release(struct inode *inode, struct file *filp)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
static const struct bpf_link_ops bpf_raw_tp_lops;
|
||||
static const struct bpf_link_ops bpf_tracing_link_lops;
|
||||
#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type)
|
||||
#define BPF_MAP_TYPE(_id, _ops)
|
||||
#define BPF_LINK_TYPE(_id, _name) [_id] = #_name,
|
||||
static const char *bpf_link_type_strs[] = {
|
||||
[BPF_LINK_TYPE_UNSPEC] = "<invalid>",
|
||||
#include <linux/bpf_types.h>
|
||||
};
|
||||
#undef BPF_PROG_TYPE
|
||||
#undef BPF_MAP_TYPE
|
||||
#undef BPF_LINK_TYPE
|
||||
|
||||
static void bpf_link_show_fdinfo(struct seq_file *m, struct file *filp)
|
||||
{
|
||||
const struct bpf_link *link = filp->private_data;
|
||||
const struct bpf_prog *prog = link->prog;
|
||||
char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
|
||||
const char *link_type;
|
||||
|
||||
if (link->ops == &bpf_raw_tp_lops)
|
||||
link_type = "raw_tracepoint";
|
||||
else if (link->ops == &bpf_tracing_link_lops)
|
||||
link_type = "tracing";
|
||||
#ifdef CONFIG_CGROUP_BPF
|
||||
else if (link->ops == &bpf_cgroup_link_lops)
|
||||
link_type = "cgroup";
|
||||
#endif
|
||||
else
|
||||
link_type = "unknown";
|
||||
|
||||
bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
|
||||
seq_printf(m,
|
||||
"link_type:\t%s\n"
|
||||
"link_id:\t%u\n"
|
||||
"prog_tag:\t%s\n"
|
||||
"prog_id:\t%u\n",
|
||||
link_type,
|
||||
bpf_link_type_strs[link->type],
|
||||
link->id,
|
||||
prog_tag,
|
||||
prog->aux->id);
|
||||
if (link->ops->show_fdinfo)
|
||||
link->ops->show_fdinfo(link, m);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -2292,36 +2313,77 @@ static const struct file_operations bpf_link_fops = {
|
|||
.write = bpf_dummy_write,
|
||||
};
|
||||
|
||||
int bpf_link_new_fd(struct bpf_link *link)
|
||||
static int bpf_link_alloc_id(struct bpf_link *link)
|
||||
{
|
||||
return anon_inode_getfd("bpf-link", &bpf_link_fops, link, O_CLOEXEC);
|
||||
int id;
|
||||
|
||||
idr_preload(GFP_KERNEL);
|
||||
spin_lock_bh(&link_idr_lock);
|
||||
id = idr_alloc_cyclic(&link_idr, link, 1, INT_MAX, GFP_ATOMIC);
|
||||
spin_unlock_bh(&link_idr_lock);
|
||||
idr_preload_end();
|
||||
|
||||
return id;
|
||||
}
|
||||
|
||||
/* Similar to bpf_link_new_fd, create anon_inode for given bpf_link, but
|
||||
* instead of immediately installing fd in fdtable, just reserve it and
|
||||
* return. Caller then need to either install it with fd_install(fd, file) or
|
||||
* release with put_unused_fd(fd).
|
||||
* This is useful for cases when bpf_link attachment/detachment are
|
||||
* complicated and expensive operations and should be delayed until all the fd
|
||||
* reservation and anon_inode creation succeeds.
|
||||
/* Prepare bpf_link to be exposed to user-space by allocating anon_inode file,
|
||||
* reserving unused FD and allocating ID from link_idr. This is to be paired
|
||||
* with bpf_link_settle() to install FD and ID and expose bpf_link to
|
||||
* user-space, if bpf_link is successfully attached. If not, bpf_link and
|
||||
* pre-allocated resources are to be freed with bpf_cleanup() call. All the
|
||||
* transient state is passed around in struct bpf_link_primer.
|
||||
* This is preferred way to create and initialize bpf_link, especially when
|
||||
* there are complicated and expensive operations inbetween creating bpf_link
|
||||
* itself and attaching it to BPF hook. By using bpf_link_prime() and
|
||||
* bpf_link_settle() kernel code using bpf_link doesn't have to perform
|
||||
* expensive (and potentially failing) roll back operations in a rare case
|
||||
* that file, FD, or ID can't be allocated.
|
||||
*/
|
||||
struct file *bpf_link_new_file(struct bpf_link *link, int *reserved_fd)
|
||||
int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer)
|
||||
{
|
||||
struct file *file;
|
||||
int fd;
|
||||
int fd, id;
|
||||
|
||||
fd = get_unused_fd_flags(O_CLOEXEC);
|
||||
if (fd < 0)
|
||||
return ERR_PTR(fd);
|
||||
return fd;
|
||||
|
||||
|
||||
id = bpf_link_alloc_id(link);
|
||||
if (id < 0) {
|
||||
put_unused_fd(fd);
|
||||
return id;
|
||||
}
|
||||
|
||||
file = anon_inode_getfile("bpf_link", &bpf_link_fops, link, O_CLOEXEC);
|
||||
if (IS_ERR(file)) {
|
||||
bpf_link_free_id(id);
|
||||
put_unused_fd(fd);
|
||||
return file;
|
||||
return PTR_ERR(file);
|
||||
}
|
||||
|
||||
*reserved_fd = fd;
|
||||
return file;
|
||||
primer->link = link;
|
||||
primer->file = file;
|
||||
primer->fd = fd;
|
||||
primer->id = id;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int bpf_link_settle(struct bpf_link_primer *primer)
|
||||
{
|
||||
/* make bpf_link fetchable by ID */
|
||||
spin_lock_bh(&link_idr_lock);
|
||||
primer->link->id = primer->id;
|
||||
spin_unlock_bh(&link_idr_lock);
|
||||
/* make bpf_link fetchable by FD */
|
||||
fd_install(primer->fd, primer->file);
|
||||
/* pass through installed FD */
|
||||
return primer->fd;
|
||||
}
|
||||
|
||||
int bpf_link_new_fd(struct bpf_link *link)
|
||||
{
|
||||
return anon_inode_getfd("bpf-link", &bpf_link_fops, link, O_CLOEXEC);
|
||||
}
|
||||
|
||||
struct bpf_link *bpf_link_get_from_fd(u32 ufd)
|
||||
|
@ -2345,6 +2407,7 @@ struct bpf_link *bpf_link_get_from_fd(u32 ufd)
|
|||
|
||||
struct bpf_tracing_link {
|
||||
struct bpf_link link;
|
||||
enum bpf_attach_type attach_type;
|
||||
};
|
||||
|
||||
static void bpf_tracing_link_release(struct bpf_link *link)
|
||||
|
@ -2360,16 +2423,40 @@ static void bpf_tracing_link_dealloc(struct bpf_link *link)
|
|||
kfree(tr_link);
|
||||
}
|
||||
|
||||
static void bpf_tracing_link_show_fdinfo(const struct bpf_link *link,
|
||||
struct seq_file *seq)
|
||||
{
|
||||
struct bpf_tracing_link *tr_link =
|
||||
container_of(link, struct bpf_tracing_link, link);
|
||||
|
||||
seq_printf(seq,
|
||||
"attach_type:\t%d\n",
|
||||
tr_link->attach_type);
|
||||
}
|
||||
|
||||
static int bpf_tracing_link_fill_link_info(const struct bpf_link *link,
|
||||
struct bpf_link_info *info)
|
||||
{
|
||||
struct bpf_tracing_link *tr_link =
|
||||
container_of(link, struct bpf_tracing_link, link);
|
||||
|
||||
info->tracing.attach_type = tr_link->attach_type;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct bpf_link_ops bpf_tracing_link_lops = {
|
||||
.release = bpf_tracing_link_release,
|
||||
.dealloc = bpf_tracing_link_dealloc,
|
||||
.show_fdinfo = bpf_tracing_link_show_fdinfo,
|
||||
.fill_link_info = bpf_tracing_link_fill_link_info,
|
||||
};
|
||||
|
||||
static int bpf_tracing_prog_attach(struct bpf_prog *prog)
|
||||
{
|
||||
struct bpf_link_primer link_primer;
|
||||
struct bpf_tracing_link *link;
|
||||
struct file *link_file;
|
||||
int link_fd, err;
|
||||
int err;
|
||||
|
||||
switch (prog->type) {
|
||||
case BPF_PROG_TYPE_TRACING:
|
||||
|
@ -2402,24 +2489,23 @@ static int bpf_tracing_prog_attach(struct bpf_prog *prog)
|
|||
err = -ENOMEM;
|
||||
goto out_put_prog;
|
||||
}
|
||||
bpf_link_init(&link->link, &bpf_tracing_link_lops, prog);
|
||||
bpf_link_init(&link->link, BPF_LINK_TYPE_TRACING,
|
||||
&bpf_tracing_link_lops, prog);
|
||||
link->attach_type = prog->expected_attach_type;
|
||||
|
||||
link_file = bpf_link_new_file(&link->link, &link_fd);
|
||||
if (IS_ERR(link_file)) {
|
||||
err = bpf_link_prime(&link->link, &link_primer);
|
||||
if (err) {
|
||||
kfree(link);
|
||||
err = PTR_ERR(link_file);
|
||||
goto out_put_prog;
|
||||
}
|
||||
|
||||
err = bpf_trampoline_link_prog(prog);
|
||||
if (err) {
|
||||
bpf_link_cleanup(&link->link, link_file, link_fd);
|
||||
bpf_link_cleanup(&link_primer);
|
||||
goto out_put_prog;
|
||||
}
|
||||
|
||||
fd_install(link_fd, link_file);
|
||||
return link_fd;
|
||||
|
||||
return bpf_link_settle(&link_primer);
|
||||
out_put_prog:
|
||||
bpf_prog_put(prog);
|
||||
return err;
|
||||
|
@ -2447,22 +2533,69 @@ static void bpf_raw_tp_link_dealloc(struct bpf_link *link)
|
|||
kfree(raw_tp);
|
||||
}
|
||||
|
||||
static const struct bpf_link_ops bpf_raw_tp_lops = {
|
||||
static void bpf_raw_tp_link_show_fdinfo(const struct bpf_link *link,
|
||||
struct seq_file *seq)
|
||||
{
|
||||
struct bpf_raw_tp_link *raw_tp_link =
|
||||
container_of(link, struct bpf_raw_tp_link, link);
|
||||
|
||||
seq_printf(seq,
|
||||
"tp_name:\t%s\n",
|
||||
raw_tp_link->btp->tp->name);
|
||||
}
|
||||
|
||||
static int bpf_raw_tp_link_fill_link_info(const struct bpf_link *link,
|
||||
struct bpf_link_info *info)
|
||||
{
|
||||
struct bpf_raw_tp_link *raw_tp_link =
|
||||
container_of(link, struct bpf_raw_tp_link, link);
|
||||
char __user *ubuf = u64_to_user_ptr(info->raw_tracepoint.tp_name);
|
||||
const char *tp_name = raw_tp_link->btp->tp->name;
|
||||
u32 ulen = info->raw_tracepoint.tp_name_len;
|
||||
size_t tp_len = strlen(tp_name);
|
||||
|
||||
if (ulen && !ubuf)
|
||||
return -EINVAL;
|
||||
|
||||
info->raw_tracepoint.tp_name_len = tp_len + 1;
|
||||
|
||||
if (!ubuf)
|
||||
return 0;
|
||||
|
||||
if (ulen >= tp_len + 1) {
|
||||
if (copy_to_user(ubuf, tp_name, tp_len + 1))
|
||||
return -EFAULT;
|
||||
} else {
|
||||
char zero = '\0';
|
||||
|
||||
if (copy_to_user(ubuf, tp_name, ulen - 1))
|
||||
return -EFAULT;
|
||||
if (put_user(zero, ubuf + ulen - 1))
|
||||
return -EFAULT;
|
||||
return -ENOSPC;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct bpf_link_ops bpf_raw_tp_link_lops = {
|
||||
.release = bpf_raw_tp_link_release,
|
||||
.dealloc = bpf_raw_tp_link_dealloc,
|
||||
.show_fdinfo = bpf_raw_tp_link_show_fdinfo,
|
||||
.fill_link_info = bpf_raw_tp_link_fill_link_info,
|
||||
};
|
||||
|
||||
#define BPF_RAW_TRACEPOINT_OPEN_LAST_FIELD raw_tracepoint.prog_fd
|
||||
|
||||
static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
|
||||
{
|
||||
struct bpf_link_primer link_primer;
|
||||
struct bpf_raw_tp_link *link;
|
||||
struct bpf_raw_event_map *btp;
|
||||
struct file *link_file;
|
||||
struct bpf_prog *prog;
|
||||
const char *tp_name;
|
||||
char buf[128];
|
||||
int link_fd, err;
|
||||
int err;
|
||||
|
||||
if (CHECK_ATTR(BPF_RAW_TRACEPOINT_OPEN))
|
||||
return -EINVAL;
|
||||
|
@ -2515,24 +2648,23 @@ static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
|
|||
err = -ENOMEM;
|
||||
goto out_put_btp;
|
||||
}
|
||||
bpf_link_init(&link->link, &bpf_raw_tp_lops, prog);
|
||||
bpf_link_init(&link->link, BPF_LINK_TYPE_RAW_TRACEPOINT,
|
||||
&bpf_raw_tp_link_lops, prog);
|
||||
link->btp = btp;
|
||||
|
||||
link_file = bpf_link_new_file(&link->link, &link_fd);
|
||||
if (IS_ERR(link_file)) {
|
||||
err = bpf_link_prime(&link->link, &link_primer);
|
||||
if (err) {
|
||||
kfree(link);
|
||||
err = PTR_ERR(link_file);
|
||||
goto out_put_btp;
|
||||
}
|
||||
|
||||
err = bpf_probe_register(link->btp, prog);
|
||||
if (err) {
|
||||
bpf_link_cleanup(&link->link, link_file, link_fd);
|
||||
bpf_link_cleanup(&link_primer);
|
||||
goto out_put_btp;
|
||||
}
|
||||
|
||||
fd_install(link_fd, link_file);
|
||||
return link_fd;
|
||||
return bpf_link_settle(&link_primer);
|
||||
|
||||
out_put_btp:
|
||||
bpf_put_raw_tracepoint(btp);
|
||||
|
@ -3313,6 +3445,42 @@ static int bpf_btf_get_info_by_fd(struct btf *btf,
|
|||
return btf_get_info_by_fd(btf, attr, uattr);
|
||||
}
|
||||
|
||||
static int bpf_link_get_info_by_fd(struct bpf_link *link,
|
||||
const union bpf_attr *attr,
|
||||
union bpf_attr __user *uattr)
|
||||
{
|
||||
struct bpf_link_info __user *uinfo = u64_to_user_ptr(attr->info.info);
|
||||
struct bpf_link_info info;
|
||||
u32 info_len = attr->info.info_len;
|
||||
int err;
|
||||
|
||||
err = bpf_check_uarg_tail_zero(uinfo, sizeof(info), info_len);
|
||||
if (err)
|
||||
return err;
|
||||
info_len = min_t(u32, sizeof(info), info_len);
|
||||
|
||||
memset(&info, 0, sizeof(info));
|
||||
if (copy_from_user(&info, uinfo, info_len))
|
||||
return -EFAULT;
|
||||
|
||||
info.type = link->type;
|
||||
info.id = link->id;
|
||||
info.prog_id = link->prog->aux->id;
|
||||
|
||||
if (link->ops->fill_link_info) {
|
||||
err = link->ops->fill_link_info(link, &info);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
if (copy_to_user(uinfo, &info, info_len) ||
|
||||
put_user(info_len, &uattr->info.info_len))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
#define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info
|
||||
|
||||
static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
|
||||
|
@ -3337,6 +3505,9 @@ static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
|
|||
uattr);
|
||||
else if (f.file->f_op == &btf_fops)
|
||||
err = bpf_btf_get_info_by_fd(f.file->private_data, attr, uattr);
|
||||
else if (f.file->f_op == &bpf_link_fops)
|
||||
err = bpf_link_get_info_by_fd(f.file->private_data,
|
||||
attr, uattr);
|
||||
else
|
||||
err = -EINVAL;
|
||||
|
||||
|
@ -3464,7 +3635,7 @@ static int bpf_task_fd_query(const union bpf_attr *attr,
|
|||
if (file->f_op == &bpf_link_fops) {
|
||||
struct bpf_link *link = file->private_data;
|
||||
|
||||
if (link->ops == &bpf_raw_tp_lops) {
|
||||
if (link->ops == &bpf_raw_tp_link_lops) {
|
||||
struct bpf_raw_tp_link *raw_tp =
|
||||
container_of(link, struct bpf_raw_tp_link, link);
|
||||
struct bpf_raw_event_map *btp = raw_tp->btp;
|
||||
|
@ -3645,13 +3816,10 @@ static int link_update(union bpf_attr *attr)
|
|||
goto out_put_progs;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CGROUP_BPF
|
||||
if (link->ops == &bpf_cgroup_link_lops) {
|
||||
ret = cgroup_bpf_replace(link, old_prog, new_prog);
|
||||
goto out_put_progs;
|
||||
}
|
||||
#endif
|
||||
ret = -EINVAL;
|
||||
if (link->ops->update_prog)
|
||||
ret = link->ops->update_prog(link, new_prog, old_prog);
|
||||
else
|
||||
ret = EINVAL;
|
||||
|
||||
out_put_progs:
|
||||
if (old_prog)
|
||||
|
@ -3663,6 +3831,102 @@ static int link_update(union bpf_attr *attr)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int bpf_link_inc_not_zero(struct bpf_link *link)
|
||||
{
|
||||
return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? 0 : -ENOENT;
|
||||
}
|
||||
|
||||
#define BPF_LINK_GET_FD_BY_ID_LAST_FIELD link_id
|
||||
|
||||
static int bpf_link_get_fd_by_id(const union bpf_attr *attr)
|
||||
{
|
||||
struct bpf_link *link;
|
||||
u32 id = attr->link_id;
|
||||
int fd, err;
|
||||
|
||||
if (CHECK_ATTR(BPF_LINK_GET_FD_BY_ID))
|
||||
return -EINVAL;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
spin_lock_bh(&link_idr_lock);
|
||||
link = idr_find(&link_idr, id);
|
||||
/* before link is "settled", ID is 0, pretend it doesn't exist yet */
|
||||
if (link) {
|
||||
if (link->id)
|
||||
err = bpf_link_inc_not_zero(link);
|
||||
else
|
||||
err = -EAGAIN;
|
||||
} else {
|
||||
err = -ENOENT;
|
||||
}
|
||||
spin_unlock_bh(&link_idr_lock);
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
fd = bpf_link_new_fd(link);
|
||||
if (fd < 0)
|
||||
bpf_link_put(link);
|
||||
|
||||
return fd;
|
||||
}
|
||||
|
||||
DEFINE_MUTEX(bpf_stats_enabled_mutex);
|
||||
|
||||
static int bpf_stats_release(struct inode *inode, struct file *file)
|
||||
{
|
||||
mutex_lock(&bpf_stats_enabled_mutex);
|
||||
static_key_slow_dec(&bpf_stats_enabled_key.key);
|
||||
mutex_unlock(&bpf_stats_enabled_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct file_operations bpf_stats_fops = {
|
||||
.release = bpf_stats_release,
|
||||
};
|
||||
|
||||
static int bpf_enable_runtime_stats(void)
|
||||
{
|
||||
int fd;
|
||||
|
||||
mutex_lock(&bpf_stats_enabled_mutex);
|
||||
|
||||
/* Set a very high limit to avoid overflow */
|
||||
if (static_key_count(&bpf_stats_enabled_key.key) > INT_MAX / 2) {
|
||||
mutex_unlock(&bpf_stats_enabled_mutex);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
fd = anon_inode_getfd("bpf-stats", &bpf_stats_fops, NULL, O_CLOEXEC);
|
||||
if (fd >= 0)
|
||||
static_key_slow_inc(&bpf_stats_enabled_key.key);
|
||||
|
||||
mutex_unlock(&bpf_stats_enabled_mutex);
|
||||
return fd;
|
||||
}
|
||||
|
||||
#define BPF_ENABLE_STATS_LAST_FIELD enable_stats.type
|
||||
|
||||
static int bpf_enable_stats(union bpf_attr *attr)
|
||||
{
|
||||
|
||||
if (CHECK_ATTR(BPF_ENABLE_STATS))
|
||||
return -EINVAL;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
switch (attr->enable_stats.type) {
|
||||
case BPF_STATS_RUN_TIME:
|
||||
return bpf_enable_runtime_stats();
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
|
||||
{
|
||||
union bpf_attr attr;
|
||||
|
@ -3780,6 +4044,16 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
|
|||
case BPF_LINK_UPDATE:
|
||||
err = link_update(&attr);
|
||||
break;
|
||||
case BPF_LINK_GET_FD_BY_ID:
|
||||
err = bpf_link_get_fd_by_id(&attr);
|
||||
break;
|
||||
case BPF_LINK_GET_NEXT_ID:
|
||||
err = bpf_obj_get_next_id(&attr, uattr,
|
||||
&link_idr, &link_idr_lock);
|
||||
break;
|
||||
case BPF_ENABLE_STATS:
|
||||
err = bpf_enable_stats(&attr);
|
||||
break;
|
||||
default:
|
||||
err = -EINVAL;
|
||||
break;
|
||||
|
|
|
@ -28,9 +28,11 @@ static const struct bpf_verifier_ops * const bpf_verifier_ops[] = {
|
|||
#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
|
||||
[_id] = & _name ## _verifier_ops,
|
||||
#define BPF_MAP_TYPE(_id, _ops)
|
||||
#define BPF_LINK_TYPE(_id, _name)
|
||||
#include <linux/bpf_types.h>
|
||||
#undef BPF_PROG_TYPE
|
||||
#undef BPF_MAP_TYPE
|
||||
#undef BPF_LINK_TYPE
|
||||
};
|
||||
|
||||
/* bpf_check() is a static code analyzer that walks eBPF program
|
||||
|
@ -168,6 +170,8 @@ struct bpf_verifier_stack_elem {
|
|||
int insn_idx;
|
||||
int prev_insn_idx;
|
||||
struct bpf_verifier_stack_elem *next;
|
||||
/* length of verifier log at the time this state was pushed on stack */
|
||||
u32 log_pos;
|
||||
};
|
||||
|
||||
#define BPF_COMPLEXITY_LIMIT_JMP_SEQ 8192
|
||||
|
@ -283,6 +287,18 @@ void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt,
|
|||
log->ubuf = NULL;
|
||||
}
|
||||
|
||||
static void bpf_vlog_reset(struct bpf_verifier_log *log, u32 new_pos)
|
||||
{
|
||||
char zero = 0;
|
||||
|
||||
if (!bpf_verifier_log_needed(log))
|
||||
return;
|
||||
|
||||
log->len_used = new_pos;
|
||||
if (put_user(zero, log->ubuf + new_pos))
|
||||
log->ubuf = NULL;
|
||||
}
|
||||
|
||||
/* log_level controls verbosity level of eBPF verifier.
|
||||
* bpf_verifier_log_write() is used to dump the verification trace to the log,
|
||||
* so the user can figure out what's wrong with the program
|
||||
|
@ -413,11 +429,30 @@ static bool is_release_function(enum bpf_func_id func_id)
|
|||
return func_id == BPF_FUNC_sk_release;
|
||||
}
|
||||
|
||||
static bool is_acquire_function(enum bpf_func_id func_id)
|
||||
static bool may_be_acquire_function(enum bpf_func_id func_id)
|
||||
{
|
||||
return func_id == BPF_FUNC_sk_lookup_tcp ||
|
||||
func_id == BPF_FUNC_sk_lookup_udp ||
|
||||
func_id == BPF_FUNC_skc_lookup_tcp;
|
||||
func_id == BPF_FUNC_skc_lookup_tcp ||
|
||||
func_id == BPF_FUNC_map_lookup_elem;
|
||||
}
|
||||
|
||||
static bool is_acquire_function(enum bpf_func_id func_id,
|
||||
const struct bpf_map *map)
|
||||
{
|
||||
enum bpf_map_type map_type = map ? map->map_type : BPF_MAP_TYPE_UNSPEC;
|
||||
|
||||
if (func_id == BPF_FUNC_sk_lookup_tcp ||
|
||||
func_id == BPF_FUNC_sk_lookup_udp ||
|
||||
func_id == BPF_FUNC_skc_lookup_tcp)
|
||||
return true;
|
||||
|
||||
if (func_id == BPF_FUNC_map_lookup_elem &&
|
||||
(map_type == BPF_MAP_TYPE_SOCKMAP ||
|
||||
map_type == BPF_MAP_TYPE_SOCKHASH))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool is_ptr_cast_function(enum bpf_func_id func_id)
|
||||
|
@ -846,7 +881,7 @@ static void update_branch_counts(struct bpf_verifier_env *env, struct bpf_verifi
|
|||
}
|
||||
|
||||
static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx,
|
||||
int *insn_idx)
|
||||
int *insn_idx, bool pop_log)
|
||||
{
|
||||
struct bpf_verifier_state *cur = env->cur_state;
|
||||
struct bpf_verifier_stack_elem *elem, *head = env->head;
|
||||
|
@ -860,6 +895,8 @@ static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx,
|
|||
if (err)
|
||||
return err;
|
||||
}
|
||||
if (pop_log)
|
||||
bpf_vlog_reset(&env->log, head->log_pos);
|
||||
if (insn_idx)
|
||||
*insn_idx = head->insn_idx;
|
||||
if (prev_insn_idx)
|
||||
|
@ -887,6 +924,7 @@ static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
|
|||
elem->insn_idx = insn_idx;
|
||||
elem->prev_insn_idx = prev_insn_idx;
|
||||
elem->next = env->head;
|
||||
elem->log_pos = env->log.len_used;
|
||||
env->head = elem;
|
||||
env->stack_size++;
|
||||
err = copy_verifier_state(&elem->st, cur);
|
||||
|
@ -915,7 +953,7 @@ static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
|
|||
free_verifier_state(env->cur_state, true);
|
||||
env->cur_state = NULL;
|
||||
/* pop all elements and return */
|
||||
while (!pop_stack(env, NULL, NULL));
|
||||
while (!pop_stack(env, NULL, NULL, false));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -3915,7 +3953,8 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
|
|||
func_id != BPF_FUNC_sock_map_update &&
|
||||
func_id != BPF_FUNC_map_delete_elem &&
|
||||
func_id != BPF_FUNC_msg_redirect_map &&
|
||||
func_id != BPF_FUNC_sk_select_reuseport)
|
||||
func_id != BPF_FUNC_sk_select_reuseport &&
|
||||
func_id != BPF_FUNC_map_lookup_elem)
|
||||
goto error;
|
||||
break;
|
||||
case BPF_MAP_TYPE_SOCKHASH:
|
||||
|
@ -3923,7 +3962,8 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
|
|||
func_id != BPF_FUNC_sock_hash_update &&
|
||||
func_id != BPF_FUNC_map_delete_elem &&
|
||||
func_id != BPF_FUNC_msg_redirect_hash &&
|
||||
func_id != BPF_FUNC_sk_select_reuseport)
|
||||
func_id != BPF_FUNC_sk_select_reuseport &&
|
||||
func_id != BPF_FUNC_map_lookup_elem)
|
||||
goto error;
|
||||
break;
|
||||
case BPF_MAP_TYPE_REUSEPORT_SOCKARRAY:
|
||||
|
@ -4093,7 +4133,7 @@ static bool check_refcount_ok(const struct bpf_func_proto *fn, int func_id)
|
|||
/* A reference acquiring function cannot acquire
|
||||
* another refcounted ptr.
|
||||
*/
|
||||
if (is_acquire_function(func_id) && count)
|
||||
if (may_be_acquire_function(func_id) && count)
|
||||
return false;
|
||||
|
||||
/* We only support one arg being unreferenced at the moment,
|
||||
|
@ -4604,7 +4644,7 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
|
|||
if (is_ptr_cast_function(func_id)) {
|
||||
/* For release_reference() */
|
||||
regs[BPF_REG_0].ref_obj_id = meta.ref_obj_id;
|
||||
} else if (is_acquire_function(func_id)) {
|
||||
} else if (is_acquire_function(func_id, meta.map_ptr)) {
|
||||
int id = acquire_reference_state(env, insn_idx);
|
||||
|
||||
if (id < 0)
|
||||
|
@ -5609,7 +5649,7 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
|
|||
{
|
||||
struct bpf_reg_state *regs = cur_regs(env);
|
||||
u8 opcode = BPF_OP(insn->code);
|
||||
bool src_known, dst_known;
|
||||
bool src_known;
|
||||
s64 smin_val, smax_val;
|
||||
u64 umin_val, umax_val;
|
||||
s32 s32_min_val, s32_max_val;
|
||||
|
@ -5631,7 +5671,6 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
|
|||
|
||||
if (alu32) {
|
||||
src_known = tnum_subreg_is_const(src_reg.var_off);
|
||||
dst_known = tnum_subreg_is_const(dst_reg->var_off);
|
||||
if ((src_known &&
|
||||
(s32_min_val != s32_max_val || u32_min_val != u32_max_val)) ||
|
||||
s32_min_val > s32_max_val || u32_min_val > u32_max_val) {
|
||||
|
@ -5643,7 +5682,6 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
|
|||
}
|
||||
} else {
|
||||
src_known = tnum_is_const(src_reg.var_off);
|
||||
dst_known = tnum_is_const(dst_reg->var_off);
|
||||
if ((src_known &&
|
||||
(smin_val != smax_val || umin_val != umax_val)) ||
|
||||
smin_val > smax_val || umin_val > umax_val) {
|
||||
|
@ -6515,12 +6553,16 @@ static void mark_ptr_or_null_reg(struct bpf_func_state *state,
|
|||
if (is_null) {
|
||||
reg->type = SCALAR_VALUE;
|
||||
} else if (reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
|
||||
if (reg->map_ptr->inner_map_meta) {
|
||||
const struct bpf_map *map = reg->map_ptr;
|
||||
|
||||
if (map->inner_map_meta) {
|
||||
reg->type = CONST_PTR_TO_MAP;
|
||||
reg->map_ptr = reg->map_ptr->inner_map_meta;
|
||||
} else if (reg->map_ptr->map_type ==
|
||||
BPF_MAP_TYPE_XSKMAP) {
|
||||
reg->map_ptr = map->inner_map_meta;
|
||||
} else if (map->map_type == BPF_MAP_TYPE_XSKMAP) {
|
||||
reg->type = PTR_TO_XDP_SOCK;
|
||||
} else if (map->map_type == BPF_MAP_TYPE_SOCKMAP ||
|
||||
map->map_type == BPF_MAP_TYPE_SOCKHASH) {
|
||||
reg->type = PTR_TO_SOCKET;
|
||||
} else {
|
||||
reg->type = PTR_TO_MAP_VALUE;
|
||||
}
|
||||
|
@ -8409,6 +8451,7 @@ static bool reg_type_mismatch(enum bpf_reg_type src, enum bpf_reg_type prev)
|
|||
|
||||
static int do_check(struct bpf_verifier_env *env)
|
||||
{
|
||||
bool pop_log = !(env->log.level & BPF_LOG_LEVEL2);
|
||||
struct bpf_verifier_state *state = env->cur_state;
|
||||
struct bpf_insn *insns = env->prog->insnsi;
|
||||
struct bpf_reg_state *regs;
|
||||
|
@ -8685,7 +8728,7 @@ static int do_check(struct bpf_verifier_env *env)
|
|||
process_bpf_exit:
|
||||
update_branch_counts(env, env->cur_state);
|
||||
err = pop_stack(env, &prev_insn_idx,
|
||||
&env->insn_idx);
|
||||
&env->insn_idx, pop_log);
|
||||
if (err < 0) {
|
||||
if (err != -ENOENT)
|
||||
return err;
|
||||
|
@ -10208,6 +10251,7 @@ static void sanitize_insn_aux_data(struct bpf_verifier_env *env)
|
|||
|
||||
static int do_check_common(struct bpf_verifier_env *env, int subprog)
|
||||
{
|
||||
bool pop_log = !(env->log.level & BPF_LOG_LEVEL2);
|
||||
struct bpf_verifier_state *state;
|
||||
struct bpf_reg_state *regs;
|
||||
int ret, i;
|
||||
|
@ -10270,7 +10314,9 @@ static int do_check_common(struct bpf_verifier_env *env, int subprog)
|
|||
free_verifier_state(env->cur_state, true);
|
||||
env->cur_state = NULL;
|
||||
}
|
||||
while (!pop_stack(env, NULL, NULL));
|
||||
while (!pop_stack(env, NULL, NULL, false));
|
||||
if (!ret && pop_log)
|
||||
bpf_vlog_reset(&env->log, 0);
|
||||
free_states(env);
|
||||
if (ret)
|
||||
/* clean aux data in case subprog was rejected */
|
||||
|
|
|
@ -6508,33 +6508,6 @@ int cgroup_bpf_attach(struct cgroup *cgrp,
|
|||
return ret;
|
||||
}
|
||||
|
||||
int cgroup_bpf_replace(struct bpf_link *link, struct bpf_prog *old_prog,
|
||||
struct bpf_prog *new_prog)
|
||||
{
|
||||
struct bpf_cgroup_link *cg_link;
|
||||
int ret;
|
||||
|
||||
if (link->ops != &bpf_cgroup_link_lops)
|
||||
return -EINVAL;
|
||||
|
||||
cg_link = container_of(link, struct bpf_cgroup_link, link);
|
||||
|
||||
mutex_lock(&cgroup_mutex);
|
||||
/* link might have been auto-released by dying cgroup, so fail */
|
||||
if (!cg_link->cgroup) {
|
||||
ret = -EINVAL;
|
||||
goto out_unlock;
|
||||
}
|
||||
if (old_prog && link->prog != old_prog) {
|
||||
ret = -EPERM;
|
||||
goto out_unlock;
|
||||
}
|
||||
ret = __cgroup_bpf_replace(cg_link->cgroup, cg_link, new_prog);
|
||||
out_unlock:
|
||||
mutex_unlock(&cgroup_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
|
||||
enum bpf_attach_type type)
|
||||
{
|
||||
|
|
|
@ -236,7 +236,7 @@ get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
|
|||
* sysctl_perf_event_max_contexts_per_stack.
|
||||
*/
|
||||
int perf_event_max_stack_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int *value = table->data;
|
||||
int new_value = *value, ret;
|
||||
|
|
|
@ -437,8 +437,7 @@ static void update_perf_cpu_limits(void)
|
|||
static bool perf_rotate_context(struct perf_cpu_context *cpuctx);
|
||||
|
||||
int perf_proc_update_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int ret;
|
||||
int perf_cpu = sysctl_perf_cpu_time_max_percent;
|
||||
|
@ -462,8 +461,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
|
|||
int sysctl_perf_cpu_time_max_percent __read_mostly = DEFAULT_CPU_TIME_MAX_PERCENT;
|
||||
|
||||
int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
|
||||
|
||||
|
|
|
@ -892,7 +892,7 @@ static void unoptimize_all_kprobes(void)
|
|||
static DEFINE_MUTEX(kprobe_sysctl_mutex);
|
||||
int sysctl_kprobes_optimization;
|
||||
int proc_kprobes_optimization_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *length,
|
||||
void *buffer, size_t *length,
|
||||
loff_t *ppos)
|
||||
{
|
||||
int ret;
|
||||
|
|
|
@ -269,8 +269,8 @@ static int __init init_lstats_procfs(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int sysctl_latencytop(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
int sysctl_latencytop(struct ctl_table *table, int write, void *buffer,
|
||||
size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int err;
|
||||
|
||||
|
|
|
@ -263,7 +263,7 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns)
|
|||
|
||||
#ifdef CONFIG_CHECKPOINT_RESTORE
|
||||
static int pid_ns_ctl_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
struct pid_namespace *pid_ns = task_active_pid_ns(current);
|
||||
struct ctl_table tmp = *table;
|
||||
|
|
|
@ -173,7 +173,7 @@ __setup("printk.devkmsg=", control_devkmsg);
|
|||
char devkmsg_log_str[DEVKMSG_STR_MAX_SIZE] = "ratelimit";
|
||||
|
||||
int devkmsg_sysctl_set_loglvl(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
char old_str[DEVKMSG_STR_MAX_SIZE];
|
||||
unsigned int old;
|
||||
|
|
|
@ -1110,8 +1110,7 @@ static void uclamp_update_root_tg(void) { }
|
|||
#endif
|
||||
|
||||
int sysctl_sched_uclamp_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
bool update_root_tg = false;
|
||||
int old_min, old_max;
|
||||
|
@ -2718,7 +2717,7 @@ void set_numabalancing_state(bool enabled)
|
|||
|
||||
#ifdef CONFIG_PROC_SYSCTL
|
||||
int sysctl_numa_balancing(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
struct ctl_table t;
|
||||
int err;
|
||||
|
@ -2792,8 +2791,8 @@ static void __init init_schedstats(void)
|
|||
}
|
||||
|
||||
#ifdef CONFIG_PROC_SYSCTL
|
||||
int sysctl_schedstats(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
int sysctl_schedstats(struct ctl_table *table, int write, void *buffer,
|
||||
size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
struct ctl_table t;
|
||||
int err;
|
||||
|
|
|
@ -645,8 +645,7 @@ struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq)
|
|||
*/
|
||||
|
||||
int sched_proc_update_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
|
||||
unsigned int factor = get_update_sysctl_factor();
|
||||
|
|
|
@ -2714,9 +2714,8 @@ static void sched_rt_do_global(void)
|
|||
def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period());
|
||||
}
|
||||
|
||||
int sched_rt_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
loff_t *ppos)
|
||||
int sched_rt_handler(struct ctl_table *table, int write, void *buffer,
|
||||
size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int old_period, old_runtime;
|
||||
static DEFINE_MUTEX(mutex);
|
||||
|
@ -2754,9 +2753,8 @@ int sched_rt_handler(struct ctl_table *table, int write,
|
|||
return ret;
|
||||
}
|
||||
|
||||
int sched_rr_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
loff_t *ppos)
|
||||
int sched_rr_handler(struct ctl_table *table, int write, void *buffer,
|
||||
size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int ret;
|
||||
static DEFINE_MUTEX(mutex);
|
||||
|
|
|
@ -209,7 +209,7 @@ bool sched_energy_update;
|
|||
|
||||
#ifdef CONFIG_PROC_SYSCTL
|
||||
int sched_energy_aware_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int ret, state;
|
||||
|
||||
|
|
|
@ -1776,7 +1776,7 @@ static void audit_actions_logged(u32 actions_logged, u32 old_actions_logged,
|
|||
}
|
||||
|
||||
static int seccomp_actions_logged_handler(struct ctl_table *ro_table, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
void *buffer, size_t *lenp,
|
||||
loff_t *ppos)
|
||||
{
|
||||
int ret;
|
||||
|
|
3063
kernel/sysctl.c
3063
kernel/sysctl.c
File diff suppressed because it is too large
Load Diff
|
@ -249,8 +249,7 @@ void timers_update_nohz(void)
|
|||
}
|
||||
|
||||
int timer_migration_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
|
|
@ -797,6 +797,8 @@ bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
|||
return &bpf_map_peek_elem_proto;
|
||||
case BPF_FUNC_ktime_get_ns:
|
||||
return &bpf_ktime_get_ns_proto;
|
||||
case BPF_FUNC_ktime_get_boot_ns:
|
||||
return &bpf_ktime_get_boot_ns_proto;
|
||||
case BPF_FUNC_tail_call:
|
||||
return &bpf_tail_call_proto;
|
||||
case BPF_FUNC_get_current_pid_tgid:
|
||||
|
|
|
@ -2661,7 +2661,7 @@ static void output_printk(struct trace_event_buffer *fbuffer)
|
|||
}
|
||||
|
||||
int tracepoint_printk_sysctl(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
void *buffer, size_t *lenp,
|
||||
loff_t *ppos)
|
||||
{
|
||||
int save_tracepoint_printk;
|
||||
|
|
|
@ -630,7 +630,7 @@ int call_usermodehelper(const char *path, char **argv, char **envp, int wait)
|
|||
EXPORT_SYMBOL(call_usermodehelper);
|
||||
|
||||
static int proc_cap_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
struct ctl_table t;
|
||||
unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
|
||||
|
|
|
@ -30,7 +30,7 @@ static void *get_uts(struct ctl_table *table)
|
|||
* to observe. Should this be in kernel/sys.c ????
|
||||
*/
|
||||
static int proc_do_uts_string(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
struct ctl_table uts_table;
|
||||
int r;
|
||||
|
|
|
@ -661,7 +661,7 @@ static void proc_watchdog_update(void)
|
|||
* proc_soft_watchdog | soft_watchdog_user_enabled | SOFT_WATCHDOG_ENABLED
|
||||
*/
|
||||
static int proc_watchdog_common(int which, struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int err, old, *param = table->data;
|
||||
|
||||
|
@ -688,7 +688,7 @@ static int proc_watchdog_common(int which, struct ctl_table *table, int write,
|
|||
* /proc/sys/kernel/watchdog
|
||||
*/
|
||||
int proc_watchdog(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
return proc_watchdog_common(NMI_WATCHDOG_ENABLED|SOFT_WATCHDOG_ENABLED,
|
||||
table, write, buffer, lenp, ppos);
|
||||
|
@ -698,7 +698,7 @@ int proc_watchdog(struct ctl_table *table, int write,
|
|||
* /proc/sys/kernel/nmi_watchdog
|
||||
*/
|
||||
int proc_nmi_watchdog(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
if (!nmi_watchdog_available && write)
|
||||
return -ENOTSUPP;
|
||||
|
@ -710,7 +710,7 @@ int proc_nmi_watchdog(struct ctl_table *table, int write,
|
|||
* /proc/sys/kernel/soft_watchdog
|
||||
*/
|
||||
int proc_soft_watchdog(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
return proc_watchdog_common(SOFT_WATCHDOG_ENABLED,
|
||||
table, write, buffer, lenp, ppos);
|
||||
|
@ -720,7 +720,7 @@ int proc_soft_watchdog(struct ctl_table *table, int write,
|
|||
* /proc/sys/kernel/watchdog_thresh
|
||||
*/
|
||||
int proc_watchdog_thresh(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int err, old;
|
||||
|
||||
|
@ -743,7 +743,7 @@ int proc_watchdog_thresh(struct ctl_table *table, int write,
|
|||
* been brought online, if desired.
|
||||
*/
|
||||
int proc_watchdog_cpumask(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int err;
|
||||
|
||||
|
|
|
@ -2463,7 +2463,7 @@ int sysctl_compact_memory;
|
|||
* /proc/sys/vm/compact_memory
|
||||
*/
|
||||
int sysctl_compaction_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *length, loff_t *ppos)
|
||||
void *buffer, size_t *length, loff_t *ppos)
|
||||
{
|
||||
if (write)
|
||||
compact_nodes();
|
||||
|
|
|
@ -3352,7 +3352,7 @@ static unsigned int cpuset_mems_nr(unsigned int *array)
|
|||
#ifdef CONFIG_SYSCTL
|
||||
static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
|
||||
struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *length, loff_t *ppos)
|
||||
void *buffer, size_t *length, loff_t *ppos)
|
||||
{
|
||||
struct hstate *h = &default_hstate;
|
||||
unsigned long tmp = h->max_huge_pages;
|
||||
|
@ -3375,7 +3375,7 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
|
|||
}
|
||||
|
||||
int hugetlb_sysctl_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *length, loff_t *ppos)
|
||||
void *buffer, size_t *length, loff_t *ppos)
|
||||
{
|
||||
|
||||
return hugetlb_sysctl_handler_common(false, table, write,
|
||||
|
@ -3384,7 +3384,7 @@ int hugetlb_sysctl_handler(struct ctl_table *table, int write,
|
|||
|
||||
#ifdef CONFIG_NUMA
|
||||
int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *length, loff_t *ppos)
|
||||
void *buffer, size_t *length, loff_t *ppos)
|
||||
{
|
||||
return hugetlb_sysctl_handler_common(true, table, write,
|
||||
buffer, length, ppos);
|
||||
|
@ -3392,8 +3392,7 @@ int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
|
|||
#endif /* CONFIG_NUMA */
|
||||
|
||||
int hugetlb_overcommit_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer,
|
||||
size_t *length, loff_t *ppos)
|
||||
void *buffer, size_t *length, loff_t *ppos)
|
||||
{
|
||||
struct hstate *h = &default_hstate;
|
||||
unsigned long tmp;
|
||||
|
|
|
@ -512,8 +512,7 @@ bool node_dirty_ok(struct pglist_data *pgdat)
|
|||
}
|
||||
|
||||
int dirty_background_ratio_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
@ -524,8 +523,7 @@ int dirty_background_ratio_handler(struct ctl_table *table, int write,
|
|||
}
|
||||
|
||||
int dirty_background_bytes_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
@ -535,9 +533,8 @@ int dirty_background_bytes_handler(struct ctl_table *table, int write,
|
|||
return ret;
|
||||
}
|
||||
|
||||
int dirty_ratio_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
loff_t *ppos)
|
||||
int dirty_ratio_handler(struct ctl_table *table, int write, void *buffer,
|
||||
size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int old_ratio = vm_dirty_ratio;
|
||||
int ret;
|
||||
|
@ -551,8 +548,7 @@ int dirty_ratio_handler(struct ctl_table *table, int write,
|
|||
}
|
||||
|
||||
int dirty_bytes_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
unsigned long old_bytes = vm_dirty_bytes;
|
||||
int ret;
|
||||
|
@ -1972,7 +1968,7 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb)
|
|||
* sysctl handler for /proc/sys/vm/dirty_writeback_centisecs
|
||||
*/
|
||||
int dirty_writeback_centisecs_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *length, loff_t *ppos)
|
||||
void *buffer, size_t *length, loff_t *ppos)
|
||||
{
|
||||
unsigned int old_interval = dirty_writeback_interval;
|
||||
int ret;
|
||||
|
|
|
@ -5546,21 +5546,11 @@ char numa_zonelist_order[] = "Node";
|
|||
* sysctl handler for numa_zonelist_order
|
||||
*/
|
||||
int numa_zonelist_order_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *length,
|
||||
loff_t *ppos)
|
||||
void *buffer, size_t *length, loff_t *ppos)
|
||||
{
|
||||
char *str;
|
||||
int ret;
|
||||
|
||||
if (!write)
|
||||
return proc_dostring(table, write, buffer, length, ppos);
|
||||
str = memdup_user_nul(buffer, 16);
|
||||
if (IS_ERR(str))
|
||||
return PTR_ERR(str);
|
||||
|
||||
ret = __parse_numa_zonelist_order(str);
|
||||
kfree(str);
|
||||
return ret;
|
||||
if (write)
|
||||
return __parse_numa_zonelist_order(buffer);
|
||||
return proc_dostring(table, write, buffer, length, ppos);
|
||||
}
|
||||
|
||||
|
||||
|
@ -7963,7 +7953,7 @@ core_initcall(init_per_zone_wmark_min)
|
|||
* changes.
|
||||
*/
|
||||
int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *length, loff_t *ppos)
|
||||
void *buffer, size_t *length, loff_t *ppos)
|
||||
{
|
||||
int rc;
|
||||
|
||||
|
@ -7978,20 +7968,8 @@ int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int watermark_boost_factor_sysctl_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *length, loff_t *ppos)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *length, loff_t *ppos)
|
||||
void *buffer, size_t *length, loff_t *ppos)
|
||||
{
|
||||
int rc;
|
||||
|
||||
|
@ -8021,7 +7999,7 @@ static void setup_min_unmapped_ratio(void)
|
|||
|
||||
|
||||
int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *length, loff_t *ppos)
|
||||
void *buffer, size_t *length, loff_t *ppos)
|
||||
{
|
||||
int rc;
|
||||
|
||||
|
@ -8048,7 +8026,7 @@ static void setup_min_slab_ratio(void)
|
|||
}
|
||||
|
||||
int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *length, loff_t *ppos)
|
||||
void *buffer, size_t *length, loff_t *ppos)
|
||||
{
|
||||
int rc;
|
||||
|
||||
|
@ -8072,7 +8050,7 @@ int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
|
|||
* if in function of the boot time zone sizes.
|
||||
*/
|
||||
int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *length, loff_t *ppos)
|
||||
void *buffer, size_t *length, loff_t *ppos)
|
||||
{
|
||||
proc_dointvec_minmax(table, write, buffer, length, ppos);
|
||||
setup_per_zone_lowmem_reserve();
|
||||
|
@ -8094,7 +8072,7 @@ static void __zone_pcp_update(struct zone *zone)
|
|||
* pagelist can have before it gets flushed back to buddy allocator.
|
||||
*/
|
||||
int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *length, loff_t *ppos)
|
||||
void *buffer, size_t *length, loff_t *ppos)
|
||||
{
|
||||
struct zone *zone;
|
||||
int old_percpu_pagelist_fraction;
|
||||
|
|
10
mm/util.c
10
mm/util.c
|
@ -717,9 +717,8 @@ int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
|
|||
unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
|
||||
unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
|
||||
|
||||
int overcommit_ratio_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
loff_t *ppos)
|
||||
int overcommit_ratio_handler(struct ctl_table *table, int write, void *buffer,
|
||||
size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
@ -729,9 +728,8 @@ int overcommit_ratio_handler(struct ctl_table *table, int write,
|
|||
return ret;
|
||||
}
|
||||
|
||||
int overcommit_kbytes_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
loff_t *ppos)
|
||||
int overcommit_kbytes_handler(struct ctl_table *table, int write, void *buffer,
|
||||
size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
|
|
@ -76,7 +76,7 @@ static void invalid_numa_statistics(void)
|
|||
static DEFINE_MUTEX(vm_numa_stat_lock);
|
||||
|
||||
int sysctl_vm_numa_stat_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *length, loff_t *ppos)
|
||||
void *buffer, size_t *length, loff_t *ppos)
|
||||
{
|
||||
int ret, oldval;
|
||||
|
||||
|
@ -1751,7 +1751,7 @@ static void refresh_vm_stats(struct work_struct *work)
|
|||
}
|
||||
|
||||
int vmstat_refresh(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
long val;
|
||||
int err;
|
||||
|
|
|
@ -1027,7 +1027,7 @@ int br_nf_hook_thresh(unsigned int hook, struct net *net,
|
|||
#ifdef CONFIG_SYSCTL
|
||||
static
|
||||
int brnf_sysctl_call_tables(struct ctl_table *ctl, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
|
|
@ -256,17 +256,6 @@ BPF_CALL_2(bpf_skb_load_helper_32_no_cache, const struct sk_buff *, skb,
|
|||
offset);
|
||||
}
|
||||
|
||||
BPF_CALL_0(bpf_get_raw_cpu_id)
|
||||
{
|
||||
return raw_smp_processor_id();
|
||||
}
|
||||
|
||||
static const struct bpf_func_proto bpf_get_raw_smp_processor_id_proto = {
|
||||
.func = bpf_get_raw_cpu_id,
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_INTEGER,
|
||||
};
|
||||
|
||||
static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg,
|
||||
struct bpf_insn *insn_buf)
|
||||
{
|
||||
|
@ -4205,36 +4194,19 @@ static const struct bpf_func_proto bpf_get_socket_uid_proto = {
|
|||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
};
|
||||
|
||||
BPF_CALL_5(bpf_event_output_data, void *, ctx, struct bpf_map *, map, u64, flags,
|
||||
void *, data, u64, size)
|
||||
#define SOCKOPT_CC_REINIT (1 << 0)
|
||||
|
||||
static int _bpf_setsockopt(struct sock *sk, int level, int optname,
|
||||
char *optval, int optlen, u32 flags)
|
||||
{
|
||||
if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
|
||||
return -EINVAL;
|
||||
|
||||
return bpf_event_output(map, flags, data, size, NULL, 0, NULL);
|
||||
}
|
||||
|
||||
static const struct bpf_func_proto bpf_event_output_data_proto = {
|
||||
.func = bpf_event_output_data,
|
||||
.gpl_only = true,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
.arg2_type = ARG_CONST_MAP_PTR,
|
||||
.arg3_type = ARG_ANYTHING,
|
||||
.arg4_type = ARG_PTR_TO_MEM,
|
||||
.arg5_type = ARG_CONST_SIZE_OR_ZERO,
|
||||
};
|
||||
|
||||
BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
|
||||
int, level, int, optname, char *, optval, int, optlen)
|
||||
{
|
||||
struct sock *sk = bpf_sock->sk;
|
||||
int ret = 0;
|
||||
int val;
|
||||
|
||||
if (!sk_fullsock(sk))
|
||||
return -EINVAL;
|
||||
|
||||
sock_owned_by_me(sk);
|
||||
|
||||
if (level == SOL_SOCKET) {
|
||||
if (optlen != sizeof(int))
|
||||
return -EINVAL;
|
||||
|
@ -4329,7 +4301,7 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
|
|||
sk->sk_prot->setsockopt == tcp_setsockopt) {
|
||||
if (optname == TCP_CONGESTION) {
|
||||
char name[TCP_CA_NAME_MAX];
|
||||
bool reinit = bpf_sock->op > BPF_SOCK_OPS_NEEDS_ECN;
|
||||
bool reinit = flags & SOCKOPT_CC_REINIT;
|
||||
|
||||
strncpy(name, optval, min_t(long, optlen,
|
||||
TCP_CA_NAME_MAX-1));
|
||||
|
@ -4376,24 +4348,14 @@ BPF_CALL_5(bpf_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static const struct bpf_func_proto bpf_setsockopt_proto = {
|
||||
.func = bpf_setsockopt,
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
.arg2_type = ARG_ANYTHING,
|
||||
.arg3_type = ARG_ANYTHING,
|
||||
.arg4_type = ARG_PTR_TO_MEM,
|
||||
.arg5_type = ARG_CONST_SIZE,
|
||||
};
|
||||
|
||||
BPF_CALL_5(bpf_getsockopt, struct bpf_sock_ops_kern *, bpf_sock,
|
||||
int, level, int, optname, char *, optval, int, optlen)
|
||||
static int _bpf_getsockopt(struct sock *sk, int level, int optname,
|
||||
char *optval, int optlen)
|
||||
{
|
||||
struct sock *sk = bpf_sock->sk;
|
||||
|
||||
if (!sk_fullsock(sk))
|
||||
goto err_clear;
|
||||
|
||||
sock_owned_by_me(sk);
|
||||
|
||||
#ifdef CONFIG_INET
|
||||
if (level == SOL_TCP && sk->sk_prot->getsockopt == tcp_getsockopt) {
|
||||
struct inet_connection_sock *icsk;
|
||||
|
@ -4459,8 +4421,71 @@ BPF_CALL_5(bpf_getsockopt, struct bpf_sock_ops_kern *, bpf_sock,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
static const struct bpf_func_proto bpf_getsockopt_proto = {
|
||||
.func = bpf_getsockopt,
|
||||
BPF_CALL_5(bpf_sock_addr_setsockopt, struct bpf_sock_addr_kern *, ctx,
|
||||
int, level, int, optname, char *, optval, int, optlen)
|
||||
{
|
||||
u32 flags = 0;
|
||||
return _bpf_setsockopt(ctx->sk, level, optname, optval, optlen,
|
||||
flags);
|
||||
}
|
||||
|
||||
static const struct bpf_func_proto bpf_sock_addr_setsockopt_proto = {
|
||||
.func = bpf_sock_addr_setsockopt,
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
.arg2_type = ARG_ANYTHING,
|
||||
.arg3_type = ARG_ANYTHING,
|
||||
.arg4_type = ARG_PTR_TO_MEM,
|
||||
.arg5_type = ARG_CONST_SIZE,
|
||||
};
|
||||
|
||||
BPF_CALL_5(bpf_sock_addr_getsockopt, struct bpf_sock_addr_kern *, ctx,
|
||||
int, level, int, optname, char *, optval, int, optlen)
|
||||
{
|
||||
return _bpf_getsockopt(ctx->sk, level, optname, optval, optlen);
|
||||
}
|
||||
|
||||
static const struct bpf_func_proto bpf_sock_addr_getsockopt_proto = {
|
||||
.func = bpf_sock_addr_getsockopt,
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
.arg2_type = ARG_ANYTHING,
|
||||
.arg3_type = ARG_ANYTHING,
|
||||
.arg4_type = ARG_PTR_TO_UNINIT_MEM,
|
||||
.arg5_type = ARG_CONST_SIZE,
|
||||
};
|
||||
|
||||
BPF_CALL_5(bpf_sock_ops_setsockopt, struct bpf_sock_ops_kern *, bpf_sock,
|
||||
int, level, int, optname, char *, optval, int, optlen)
|
||||
{
|
||||
u32 flags = 0;
|
||||
if (bpf_sock->op > BPF_SOCK_OPS_NEEDS_ECN)
|
||||
flags |= SOCKOPT_CC_REINIT;
|
||||
return _bpf_setsockopt(bpf_sock->sk, level, optname, optval, optlen,
|
||||
flags);
|
||||
}
|
||||
|
||||
static const struct bpf_func_proto bpf_sock_ops_setsockopt_proto = {
|
||||
.func = bpf_sock_ops_setsockopt,
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
.arg2_type = ARG_ANYTHING,
|
||||
.arg3_type = ARG_ANYTHING,
|
||||
.arg4_type = ARG_PTR_TO_MEM,
|
||||
.arg5_type = ARG_CONST_SIZE,
|
||||
};
|
||||
|
||||
BPF_CALL_5(bpf_sock_ops_getsockopt, struct bpf_sock_ops_kern *, bpf_sock,
|
||||
int, level, int, optname, char *, optval, int, optlen)
|
||||
{
|
||||
return _bpf_getsockopt(bpf_sock->sk, level, optname, optval, optlen);
|
||||
}
|
||||
|
||||
static const struct bpf_func_proto bpf_sock_ops_getsockopt_proto = {
|
||||
.func = bpf_sock_ops_getsockopt,
|
||||
.gpl_only = false,
|
||||
.ret_type = RET_INTEGER,
|
||||
.arg1_type = ARG_PTR_TO_CTX,
|
||||
|
@ -5983,52 +6008,7 @@ bool bpf_helper_changes_pkt_data(void *func)
|
|||
return false;
|
||||
}
|
||||
|
||||
const struct bpf_func_proto *
|
||||
bpf_base_func_proto(enum bpf_func_id func_id)
|
||||
{
|
||||
switch (func_id) {
|
||||
case BPF_FUNC_map_lookup_elem:
|
||||
return &bpf_map_lookup_elem_proto;
|
||||
case BPF_FUNC_map_update_elem:
|
||||
return &bpf_map_update_elem_proto;
|
||||
case BPF_FUNC_map_delete_elem:
|
||||
return &bpf_map_delete_elem_proto;
|
||||
case BPF_FUNC_map_push_elem:
|
||||
return &bpf_map_push_elem_proto;
|
||||
case BPF_FUNC_map_pop_elem:
|
||||
return &bpf_map_pop_elem_proto;
|
||||
case BPF_FUNC_map_peek_elem:
|
||||
return &bpf_map_peek_elem_proto;
|
||||
case BPF_FUNC_get_prandom_u32:
|
||||
return &bpf_get_prandom_u32_proto;
|
||||
case BPF_FUNC_get_smp_processor_id:
|
||||
return &bpf_get_raw_smp_processor_id_proto;
|
||||
case BPF_FUNC_get_numa_node_id:
|
||||
return &bpf_get_numa_node_id_proto;
|
||||
case BPF_FUNC_tail_call:
|
||||
return &bpf_tail_call_proto;
|
||||
case BPF_FUNC_ktime_get_ns:
|
||||
return &bpf_ktime_get_ns_proto;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return NULL;
|
||||
|
||||
switch (func_id) {
|
||||
case BPF_FUNC_spin_lock:
|
||||
return &bpf_spin_lock_proto;
|
||||
case BPF_FUNC_spin_unlock:
|
||||
return &bpf_spin_unlock_proto;
|
||||
case BPF_FUNC_trace_printk:
|
||||
return bpf_get_trace_printk_proto();
|
||||
case BPF_FUNC_jiffies64:
|
||||
return &bpf_jiffies64_proto;
|
||||
default:
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
const struct bpf_func_proto bpf_event_output_data_proto __weak;
|
||||
|
||||
static const struct bpf_func_proto *
|
||||
sock_filter_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
||||
|
@ -6119,6 +6099,22 @@ sock_addr_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
|||
return &bpf_sk_storage_get_proto;
|
||||
case BPF_FUNC_sk_storage_delete:
|
||||
return &bpf_sk_storage_delete_proto;
|
||||
case BPF_FUNC_setsockopt:
|
||||
switch (prog->expected_attach_type) {
|
||||
case BPF_CGROUP_INET4_CONNECT:
|
||||
case BPF_CGROUP_INET6_CONNECT:
|
||||
return &bpf_sock_addr_setsockopt_proto;
|
||||
default:
|
||||
return NULL;
|
||||
}
|
||||
case BPF_FUNC_getsockopt:
|
||||
switch (prog->expected_attach_type) {
|
||||
case BPF_CGROUP_INET4_CONNECT:
|
||||
case BPF_CGROUP_INET6_CONNECT:
|
||||
return &bpf_sock_addr_getsockopt_proto;
|
||||
default:
|
||||
return NULL;
|
||||
}
|
||||
default:
|
||||
return bpf_base_func_proto(func_id);
|
||||
}
|
||||
|
@ -6213,6 +6209,8 @@ tc_cls_act_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
|||
return &bpf_skb_adjust_room_proto;
|
||||
case BPF_FUNC_skb_change_tail:
|
||||
return &bpf_skb_change_tail_proto;
|
||||
case BPF_FUNC_skb_change_head:
|
||||
return &bpf_skb_change_head_proto;
|
||||
case BPF_FUNC_skb_get_tunnel_key:
|
||||
return &bpf_skb_get_tunnel_key_proto;
|
||||
case BPF_FUNC_skb_set_tunnel_key:
|
||||
|
@ -6335,9 +6333,9 @@ sock_ops_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
|
|||
{
|
||||
switch (func_id) {
|
||||
case BPF_FUNC_setsockopt:
|
||||
return &bpf_setsockopt_proto;
|
||||
return &bpf_sock_ops_setsockopt_proto;
|
||||
case BPF_FUNC_getsockopt:
|
||||
return &bpf_getsockopt_proto;
|
||||
return &bpf_sock_ops_getsockopt_proto;
|
||||
case BPF_FUNC_sock_ops_cb_flags_set:
|
||||
return &bpf_sock_ops_cb_flags_set_proto;
|
||||
case BPF_FUNC_sock_map_update:
|
||||
|
@ -8786,6 +8784,10 @@ BPF_CALL_4(sk_select_reuseport, struct sk_reuseport_kern *, reuse_kern,
|
|||
|
||||
reuse = rcu_dereference(selected_sk->sk_reuseport_cb);
|
||||
if (!reuse) {
|
||||
/* Lookup in sock_map can return TCP ESTABLISHED sockets. */
|
||||
if (sk_is_refcounted(selected_sk))
|
||||
sock_put(selected_sk);
|
||||
|
||||
/* reuseport_array has only sk with non NULL sk_reuseport_cb.
|
||||
* The only (!reuse) case here is - the sk has already been
|
||||
* unhashed (e.g. by close()), so treat it as -ENOENT.
|
||||
|
|
|
@ -3379,7 +3379,7 @@ EXPORT_SYMBOL(neigh_app_ns);
|
|||
static int unres_qlen_max = INT_MAX / SKB_TRUESIZE(ETH_FRAME_LEN);
|
||||
|
||||
static int proc_unres_qlen(struct ctl_table *ctl, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int size, ret;
|
||||
struct ctl_table tmp = *ctl;
|
||||
|
@ -3443,8 +3443,8 @@ static void neigh_proc_update(struct ctl_table *ctl, int write)
|
|||
}
|
||||
|
||||
static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
|
||||
void __user *buffer,
|
||||
size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp,
|
||||
loff_t *ppos)
|
||||
{
|
||||
struct ctl_table tmp = *ctl;
|
||||
int ret;
|
||||
|
@ -3457,8 +3457,8 @@ static int neigh_proc_dointvec_zero_intmax(struct ctl_table *ctl, int write,
|
|||
return ret;
|
||||
}
|
||||
|
||||
int neigh_proc_dointvec(struct ctl_table *ctl, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
int neigh_proc_dointvec(struct ctl_table *ctl, int write, void *buffer,
|
||||
size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
|
||||
|
||||
|
@ -3467,8 +3467,7 @@ int neigh_proc_dointvec(struct ctl_table *ctl, int write,
|
|||
}
|
||||
EXPORT_SYMBOL(neigh_proc_dointvec);
|
||||
|
||||
int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write,
|
||||
void __user *buffer,
|
||||
int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write, void *buffer,
|
||||
size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int ret = proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
|
||||
|
@ -3479,8 +3478,8 @@ int neigh_proc_dointvec_jiffies(struct ctl_table *ctl, int write,
|
|||
EXPORT_SYMBOL(neigh_proc_dointvec_jiffies);
|
||||
|
||||
static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table *ctl, int write,
|
||||
void __user *buffer,
|
||||
size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp,
|
||||
loff_t *ppos)
|
||||
{
|
||||
int ret = proc_dointvec_userhz_jiffies(ctl, write, buffer, lenp, ppos);
|
||||
|
||||
|
@ -3489,8 +3488,7 @@ static int neigh_proc_dointvec_userhz_jiffies(struct ctl_table *ctl, int write,
|
|||
}
|
||||
|
||||
int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
|
||||
void __user *buffer,
|
||||
size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int ret = proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
|
||||
|
||||
|
@ -3500,8 +3498,8 @@ int neigh_proc_dointvec_ms_jiffies(struct ctl_table *ctl, int write,
|
|||
EXPORT_SYMBOL(neigh_proc_dointvec_ms_jiffies);
|
||||
|
||||
static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write,
|
||||
void __user *buffer,
|
||||
size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp,
|
||||
loff_t *ppos)
|
||||
{
|
||||
int ret = proc_unres_qlen(ctl, write, buffer, lenp, ppos);
|
||||
|
||||
|
@ -3510,8 +3508,8 @@ static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write,
|
|||
}
|
||||
|
||||
static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write,
|
||||
void __user *buffer,
|
||||
size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp,
|
||||
loff_t *ppos)
|
||||
{
|
||||
struct neigh_parms *p = ctl->extra2;
|
||||
int ret;
|
||||
|
|
|
@ -343,7 +343,14 @@ static struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
|
|||
|
||||
static void *sock_map_lookup(struct bpf_map *map, void *key)
|
||||
{
|
||||
return __sock_map_lookup_elem(map, *(u32 *)key);
|
||||
struct sock *sk;
|
||||
|
||||
sk = __sock_map_lookup_elem(map, *(u32 *)key);
|
||||
if (!sk || !sk_fullsock(sk))
|
||||
return NULL;
|
||||
if (sk_is_refcounted(sk) && !refcount_inc_not_zero(&sk->sk_refcnt))
|
||||
return NULL;
|
||||
return sk;
|
||||
}
|
||||
|
||||
static void *sock_map_lookup_sys(struct bpf_map *map, void *key)
|
||||
|
@ -1051,7 +1058,14 @@ static void *sock_hash_lookup_sys(struct bpf_map *map, void *key)
|
|||
|
||||
static void *sock_hash_lookup(struct bpf_map *map, void *key)
|
||||
{
|
||||
return __sock_hash_lookup_elem(map, key);
|
||||
struct sock *sk;
|
||||
|
||||
sk = __sock_hash_lookup_elem(map, key);
|
||||
if (!sk || !sk_fullsock(sk))
|
||||
return NULL;
|
||||
if (sk_is_refcounted(sk) && !refcount_inc_not_zero(&sk->sk_refcnt))
|
||||
return NULL;
|
||||
return sk;
|
||||
}
|
||||
|
||||
static void sock_hash_release_progs(struct bpf_map *map)
|
||||
|
|
|
@ -45,7 +45,7 @@ EXPORT_SYMBOL(sysctl_devconf_inherit_init_net);
|
|||
|
||||
#ifdef CONFIG_RPS
|
||||
static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
unsigned int orig_size, size;
|
||||
int ret, i;
|
||||
|
@ -115,8 +115,7 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
|
|||
static DEFINE_MUTEX(flow_limit_update_mutex);
|
||||
|
||||
static int flow_limit_cpu_sysctl(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
struct sd_flow_limit *cur;
|
||||
struct softnet_data *sd;
|
||||
|
@ -180,10 +179,7 @@ static int flow_limit_cpu_sysctl(struct ctl_table *table, int write,
|
|||
}
|
||||
if (len < *lenp)
|
||||
kbuf[len++] = '\n';
|
||||
if (copy_to_user(buffer, kbuf, len)) {
|
||||
ret = -EFAULT;
|
||||
goto done;
|
||||
}
|
||||
memcpy(buffer, kbuf, len);
|
||||
*lenp = len;
|
||||
*ppos += len;
|
||||
}
|
||||
|
@ -194,8 +190,7 @@ static int flow_limit_cpu_sysctl(struct ctl_table *table, int write,
|
|||
}
|
||||
|
||||
static int flow_limit_table_len_sysctl(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
unsigned int old, *ptr;
|
||||
int ret;
|
||||
|
@ -217,7 +212,7 @@ static int flow_limit_table_len_sysctl(struct ctl_table *table, int write,
|
|||
|
||||
#ifdef CONFIG_NET_SCHED
|
||||
static int set_default_qdisc(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
char id[IFNAMSIZ];
|
||||
struct ctl_table tbl = {
|
||||
|
@ -236,7 +231,7 @@ static int set_default_qdisc(struct ctl_table *table, int write,
|
|||
#endif
|
||||
|
||||
static int proc_do_dev_weight(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
@ -251,7 +246,7 @@ static int proc_do_dev_weight(struct ctl_table *table, int write,
|
|||
}
|
||||
|
||||
static int proc_do_rss_key(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
struct ctl_table fake_table;
|
||||
char buf[NETDEV_RSS_KEY_LEN * 3];
|
||||
|
@ -264,7 +259,7 @@ static int proc_do_rss_key(struct ctl_table *table, int write,
|
|||
|
||||
#ifdef CONFIG_BPF_JIT
|
||||
static int proc_dointvec_minmax_bpf_enable(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
void *buffer, size_t *lenp,
|
||||
loff_t *ppos)
|
||||
{
|
||||
int ret, jit_enable = *(int *)table->data;
|
||||
|
@ -291,8 +286,7 @@ static int proc_dointvec_minmax_bpf_enable(struct ctl_table *table, int write,
|
|||
# ifdef CONFIG_HAVE_EBPF_JIT
|
||||
static int
|
||||
proc_dointvec_minmax_bpf_restricted(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
|
@ -303,8 +297,7 @@ proc_dointvec_minmax_bpf_restricted(struct ctl_table *table, int write,
|
|||
|
||||
static int
|
||||
proc_dolongvec_minmax_bpf_restricted(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
|
|
|
@ -160,8 +160,8 @@ static int max_t3[] = { 8191 }; /* Must fit in 16 bits when multiplied by BCT3MU
|
|||
static int min_priority[1];
|
||||
static int max_priority[] = { 127 }; /* From DECnet spec */
|
||||
|
||||
static int dn_forwarding_proc(struct ctl_table *, int,
|
||||
void __user *, size_t *, loff_t *);
|
||||
static int dn_forwarding_proc(struct ctl_table *, int, void *, size_t *,
|
||||
loff_t *);
|
||||
static struct dn_dev_sysctl_table {
|
||||
struct ctl_table_header *sysctl_header;
|
||||
struct ctl_table dn_dev_vars[5];
|
||||
|
@ -245,8 +245,7 @@ static void dn_dev_sysctl_unregister(struct dn_dev_parms *parms)
|
|||
}
|
||||
|
||||
static int dn_forwarding_proc(struct ctl_table *table, int write,
|
||||
void __user *buffer,
|
||||
size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
#ifdef CONFIG_DECNET_ROUTER
|
||||
struct net_device *dev = table->extra1;
|
||||
|
|
|
@ -134,8 +134,7 @@ static int parse_addr(__le16 *addr, char *str)
|
|||
}
|
||||
|
||||
static int dn_node_address_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer,
|
||||
size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
char addr[DN_ASCBUF_LEN];
|
||||
size_t len;
|
||||
|
@ -148,10 +147,7 @@ static int dn_node_address_handler(struct ctl_table *table, int write,
|
|||
|
||||
if (write) {
|
||||
len = (*lenp < DN_ASCBUF_LEN) ? *lenp : (DN_ASCBUF_LEN-1);
|
||||
|
||||
if (copy_from_user(addr, buffer, len))
|
||||
return -EFAULT;
|
||||
|
||||
memcpy(addr, buffer, len);
|
||||
addr[len] = 0;
|
||||
strip_it(addr);
|
||||
|
||||
|
@ -173,11 +169,9 @@ static int dn_node_address_handler(struct ctl_table *table, int write,
|
|||
len = strlen(addr);
|
||||
addr[len++] = '\n';
|
||||
|
||||
if (len > *lenp) len = *lenp;
|
||||
|
||||
if (copy_to_user(buffer, addr, len))
|
||||
return -EFAULT;
|
||||
|
||||
if (len > *lenp)
|
||||
len = *lenp;
|
||||
memcpy(buffer, addr, len);
|
||||
*lenp = len;
|
||||
*ppos += len;
|
||||
|
||||
|
@ -185,8 +179,7 @@ static int dn_node_address_handler(struct ctl_table *table, int write,
|
|||
}
|
||||
|
||||
static int dn_def_dev_handler(struct ctl_table *table, int write,
|
||||
void __user *buffer,
|
||||
size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
size_t len;
|
||||
struct net_device *dev;
|
||||
|
@ -201,9 +194,7 @@ static int dn_def_dev_handler(struct ctl_table *table, int write,
|
|||
if (*lenp > 16)
|
||||
return -E2BIG;
|
||||
|
||||
if (copy_from_user(devname, buffer, *lenp))
|
||||
return -EFAULT;
|
||||
|
||||
memcpy(devname, buffer, *lenp);
|
||||
devname[*lenp] = 0;
|
||||
strip_it(devname);
|
||||
|
||||
|
@ -238,9 +229,7 @@ static int dn_def_dev_handler(struct ctl_table *table, int write,
|
|||
|
||||
if (len > *lenp) len = *lenp;
|
||||
|
||||
if (copy_to_user(buffer, devname, len))
|
||||
return -EFAULT;
|
||||
|
||||
memcpy(buffer, devname, len);
|
||||
*lenp = len;
|
||||
*ppos += len;
|
||||
|
||||
|
|
|
@ -2366,8 +2366,7 @@ static int devinet_conf_ifindex(struct net *net, struct ipv4_devconf *cnf)
|
|||
}
|
||||
|
||||
static int devinet_conf_proc(struct ctl_table *ctl, int write,
|
||||
void __user *buffer,
|
||||
size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int old_value = *(int *)ctl->data;
|
||||
int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
|
||||
|
@ -2419,8 +2418,7 @@ static int devinet_conf_proc(struct ctl_table *ctl, int write,
|
|||
}
|
||||
|
||||
static int devinet_sysctl_forward(struct ctl_table *ctl, int write,
|
||||
void __user *buffer,
|
||||
size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int *valp = ctl->data;
|
||||
int val = *valp;
|
||||
|
@ -2463,8 +2461,7 @@ static int devinet_sysctl_forward(struct ctl_table *ctl, int write,
|
|||
}
|
||||
|
||||
static int ipv4_doint_and_flush(struct ctl_table *ctl, int write,
|
||||
void __user *buffer,
|
||||
size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int *valp = ctl->data;
|
||||
int val = *valp;
|
||||
|
|
|
@ -3336,8 +3336,7 @@ static int ip_rt_gc_elasticity __read_mostly = 8;
|
|||
static int ip_min_valid_pmtu __read_mostly = IPV4_MIN_MTU;
|
||||
|
||||
static int ipv4_sysctl_rtcache_flush(struct ctl_table *__ctl, int write,
|
||||
void __user *buffer,
|
||||
size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
struct net *net = (struct net *)__ctl->extra1;
|
||||
|
||||
|
|
|
@ -71,8 +71,7 @@ static void set_local_port_range(struct net *net, int range[2])
|
|||
|
||||
/* Validate changes from /proc interface. */
|
||||
static int ipv4_local_port_range(struct ctl_table *table, int write,
|
||||
void __user *buffer,
|
||||
size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
struct net *net =
|
||||
container_of(table->data, struct net, ipv4.ip_local_ports.range);
|
||||
|
@ -107,7 +106,7 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
|
|||
|
||||
/* Validate changes from /proc interface. */
|
||||
static int ipv4_privileged_ports(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
struct net *net = container_of(table->data, struct net,
|
||||
ipv4.sysctl_ip_prot_sock);
|
||||
|
@ -168,8 +167,7 @@ static void set_ping_group_range(struct ctl_table *table, kgid_t low, kgid_t hig
|
|||
|
||||
/* Validate changes from /proc interface. */
|
||||
static int ipv4_ping_group_range(struct ctl_table *table, int write,
|
||||
void __user *buffer,
|
||||
size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
struct user_namespace *user_ns = current_user_ns();
|
||||
int ret;
|
||||
|
@ -204,8 +202,7 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write,
|
|||
}
|
||||
|
||||
static int ipv4_fwd_update_priority(struct ctl_table *table, int write,
|
||||
void __user *buffer,
|
||||
size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
struct net *net;
|
||||
int ret;
|
||||
|
@ -221,7 +218,7 @@ static int ipv4_fwd_update_priority(struct ctl_table *table, int write,
|
|||
}
|
||||
|
||||
static int proc_tcp_congestion_control(struct ctl_table *ctl, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
struct net *net = container_of(ctl->data, struct net,
|
||||
ipv4.tcp_congestion_control);
|
||||
|
@ -241,9 +238,8 @@ static int proc_tcp_congestion_control(struct ctl_table *ctl, int write,
|
|||
}
|
||||
|
||||
static int proc_tcp_available_congestion_control(struct ctl_table *ctl,
|
||||
int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
loff_t *ppos)
|
||||
int write, void *buffer,
|
||||
size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, };
|
||||
int ret;
|
||||
|
@ -258,9 +254,8 @@ static int proc_tcp_available_congestion_control(struct ctl_table *ctl,
|
|||
}
|
||||
|
||||
static int proc_allowed_congestion_control(struct ctl_table *ctl,
|
||||
int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
loff_t *ppos)
|
||||
int write, void *buffer,
|
||||
size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX };
|
||||
int ret;
|
||||
|
@ -296,8 +291,7 @@ static int sscanf_key(char *buf, __le32 *key)
|
|||
}
|
||||
|
||||
static int proc_tcp_fastopen_key(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
struct net *net = container_of(table->data, struct net,
|
||||
ipv4.sysctl_tcp_fastopen);
|
||||
|
@ -399,7 +393,7 @@ static void proc_configure_early_demux(int enabled, int protocol)
|
|||
}
|
||||
|
||||
static int proc_tcp_early_demux(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
|
@ -415,7 +409,7 @@ static int proc_tcp_early_demux(struct ctl_table *table, int write,
|
|||
}
|
||||
|
||||
static int proc_udp_early_demux(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
|
@ -431,8 +425,7 @@ static int proc_udp_early_demux(struct ctl_table *table, int write,
|
|||
}
|
||||
|
||||
static int proc_tfo_blackhole_detect_timeout(struct ctl_table *table,
|
||||
int write,
|
||||
void __user *buffer,
|
||||
int write, void *buffer,
|
||||
size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
struct net *net = container_of(table->data, struct net,
|
||||
|
@ -447,8 +440,7 @@ static int proc_tfo_blackhole_detect_timeout(struct ctl_table *table,
|
|||
}
|
||||
|
||||
static int proc_tcp_available_ulp(struct ctl_table *ctl,
|
||||
int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
int write, void *buffer, size_t *lenp,
|
||||
loff_t *ppos)
|
||||
{
|
||||
struct ctl_table tbl = { .maxlen = TCP_ULP_BUF_MAX, };
|
||||
|
@ -466,7 +458,7 @@ static int proc_tcp_available_ulp(struct ctl_table *ctl,
|
|||
|
||||
#ifdef CONFIG_IP_ROUTE_MULTIPATH
|
||||
static int proc_fib_multipath_hash_policy(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
void *buffer, size_t *lenp,
|
||||
loff_t *ppos)
|
||||
{
|
||||
struct net *net = container_of(table->data, struct net,
|
||||
|
|
|
@ -6095,9 +6095,8 @@ static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
|
|||
|
||||
#ifdef CONFIG_SYSCTL
|
||||
|
||||
static
|
||||
int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
static int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int *valp = ctl->data;
|
||||
int val = *valp;
|
||||
|
@ -6121,9 +6120,8 @@ int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static
|
||||
int addrconf_sysctl_mtu(struct ctl_table *ctl, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
static int addrconf_sysctl_mtu(struct ctl_table *ctl, int write,
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
struct inet6_dev *idev = ctl->extra1;
|
||||
int min_mtu = IPV6_MIN_MTU;
|
||||
|
@ -6193,9 +6191,8 @@ static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static
|
||||
int addrconf_sysctl_disable(struct ctl_table *ctl, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
static int addrconf_sysctl_disable(struct ctl_table *ctl, int write,
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int *valp = ctl->data;
|
||||
int val = *valp;
|
||||
|
@ -6219,9 +6216,8 @@ int addrconf_sysctl_disable(struct ctl_table *ctl, int write,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static
|
||||
int addrconf_sysctl_proxy_ndp(struct ctl_table *ctl, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
static int addrconf_sysctl_proxy_ndp(struct ctl_table *ctl, int write,
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int *valp = ctl->data;
|
||||
int ret;
|
||||
|
@ -6262,7 +6258,7 @@ int addrconf_sysctl_proxy_ndp(struct ctl_table *ctl, int write,
|
|||
}
|
||||
|
||||
static int addrconf_sysctl_addr_gen_mode(struct ctl_table *ctl, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
void *buffer, size_t *lenp,
|
||||
loff_t *ppos)
|
||||
{
|
||||
int ret = 0;
|
||||
|
@ -6324,7 +6320,7 @@ static int addrconf_sysctl_addr_gen_mode(struct ctl_table *ctl, int write,
|
|||
}
|
||||
|
||||
static int addrconf_sysctl_stable_secret(struct ctl_table *ctl, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
void *buffer, size_t *lenp,
|
||||
loff_t *ppos)
|
||||
{
|
||||
int err;
|
||||
|
@ -6391,8 +6387,7 @@ static int addrconf_sysctl_stable_secret(struct ctl_table *ctl, int write,
|
|||
|
||||
static
|
||||
int addrconf_sysctl_ignore_routes_with_linkdown(struct ctl_table *ctl,
|
||||
int write,
|
||||
void __user *buffer,
|
||||
int write, void *buffer,
|
||||
size_t *lenp,
|
||||
loff_t *ppos)
|
||||
{
|
||||
|
@ -6492,10 +6487,8 @@ int addrconf_disable_policy(struct ctl_table *ctl, int *valp, int val)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static
|
||||
int addrconf_sysctl_disable_policy(struct ctl_table *ctl, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
loff_t *ppos)
|
||||
static int addrconf_sysctl_disable_policy(struct ctl_table *ctl, int write,
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int *valp = ctl->data;
|
||||
int val = *valp;
|
||||
|
|
|
@ -1835,7 +1835,8 @@ static void ndisc_warn_deprecated_sysctl(struct ctl_table *ctl,
|
|||
}
|
||||
}
|
||||
|
||||
int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
int ndisc_ifinfo_sysctl_change(struct ctl_table *ctl, int write, void *buffer,
|
||||
size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
struct net_device *dev = ctl->extra1;
|
||||
struct inet6_dev *idev;
|
||||
|
|
|
@ -6092,9 +6092,8 @@ static int rt6_stats_seq_show(struct seq_file *seq, void *v)
|
|||
|
||||
#ifdef CONFIG_SYSCTL
|
||||
|
||||
static
|
||||
int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
static int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write,
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
struct net *net;
|
||||
int delay;
|
||||
|
|
|
@ -26,8 +26,7 @@ static int auto_flowlabels_min;
|
|||
static int auto_flowlabels_max = IP6_AUTO_FLOW_LABEL_MAX;
|
||||
|
||||
static int proc_rt6_multipath_hash_policy(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
struct net *net;
|
||||
int ret;
|
||||
|
|
|
@ -1362,8 +1362,7 @@ static int mpls_netconf_dump_devconf(struct sk_buff *skb,
|
|||
(&((struct mpls_dev *)0)->field)
|
||||
|
||||
static int mpls_conf_proc(struct ctl_table *ctl, int write,
|
||||
void __user *buffer,
|
||||
size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int oval = *(int *)ctl->data;
|
||||
int ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
|
||||
|
@ -2594,7 +2593,7 @@ static int resize_platform_label_table(struct net *net, size_t limit)
|
|||
}
|
||||
|
||||
static int mpls_platform_labels(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
struct net *net = table->data;
|
||||
int platform_labels = net->mpls.platform_labels;
|
||||
|
|
|
@ -1736,7 +1736,7 @@ static int three = 3;
|
|||
|
||||
static int
|
||||
proc_do_defense_mode(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
struct netns_ipvs *ipvs = table->extra2;
|
||||
int *valp = table->data;
|
||||
|
@ -1763,7 +1763,7 @@ proc_do_defense_mode(struct ctl_table *table, int write,
|
|||
|
||||
static int
|
||||
proc_do_sync_threshold(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int *valp = table->data;
|
||||
int val[2];
|
||||
|
@ -1788,7 +1788,7 @@ proc_do_sync_threshold(struct ctl_table *table, int write,
|
|||
|
||||
static int
|
||||
proc_do_sync_ports(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int *valp = table->data;
|
||||
int val = *valp;
|
||||
|
|
|
@ -519,7 +519,7 @@ static unsigned int nf_conntrack_htable_size_user __read_mostly;
|
|||
|
||||
static int
|
||||
nf_conntrack_hash_sysctl(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
|
|
@ -414,7 +414,7 @@ static struct ctl_table nf_log_sysctl_ftable[] = {
|
|||
};
|
||||
|
||||
static int nf_log_proc_dostring(struct ctl_table *table, int write,
|
||||
void __user *buffer, size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
const struct nf_logger *logger;
|
||||
char buf[NFLOGGER_NAME_LEN];
|
||||
|
|
|
@ -49,8 +49,7 @@ void phonet_get_local_port_range(int *min, int *max)
|
|||
}
|
||||
|
||||
static int proc_local_port_range(struct ctl_table *table, int write,
|
||||
void __user *buffer,
|
||||
size_t *lenp, loff_t *ppos)
|
||||
void *buffer, size_t *lenp, loff_t *ppos)
|
||||
{
|
||||
int ret;
|
||||
int range[2] = {local_port_range[0], local_port_range[1]};
|
||||
|
|
|
@ -62,8 +62,7 @@ static atomic_t rds_tcp_unloading = ATOMIC_INIT(0);
|
|||
static struct kmem_cache *rds_tcp_conn_slab;
|
||||
|
||||
static int rds_tcp_skbuf_handler(struct ctl_table *ctl, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
loff_t *fpos);
|
||||
void *buffer, size_t *lenp, loff_t *fpos);
|
||||
|
||||
static int rds_tcp_min_sndbuf = SOCK_MIN_SNDBUF;
|
||||
static int rds_tcp_min_rcvbuf = SOCK_MIN_RCVBUF;
|
||||
|
@ -676,8 +675,7 @@ static void rds_tcp_sysctl_reset(struct net *net)
|
|||
}
|
||||
|
||||
static int rds_tcp_skbuf_handler(struct ctl_table *ctl, int write,
|
||||
void __user *buffer, size_t *lenp,
|
||||
loff_t *fpos)
|
||||
void *buffer, size_t *lenp, loff_t *fpos)
|
||||
{
|
||||
struct net *net = current->nsproxy->net_ns;
|
||||
int err;
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue