^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * This is for all the tests related to copy_to_user() and copy_from_user()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * hardening.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include "lkdtm.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/sched/task_stack.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/mman.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * Many of the tests here end up using const sizes, but those would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * normally be ignored by hardened usercopy, so force the compiler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * into choosing the non-const path to make sure we trigger the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * hardened usercopy checks by added "unconst" to all the const copies,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * and making sure "cache_size" isn't optimized into a const.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) static volatile size_t unconst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) static volatile size_t cache_size = 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) static struct kmem_cache *whitelist_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) static const unsigned char test_text[] = "This is a test.\n";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * Instead of adding -Wno-return-local-addr, just pass the stack address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * through a function to obfuscate it from the compiler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) static noinline unsigned char *trick_compiler(unsigned char *stack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) return stack + 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) static noinline unsigned char *do_usercopy_stack_callee(int value)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) unsigned char buf[32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) /* Exercise stack to avoid everything living in registers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) for (i = 0; i < sizeof(buf); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) buf[i] = value & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) return trick_compiler(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) unsigned long user_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) unsigned char good_stack[32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) unsigned char *bad_stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /* Exercise stack to avoid everything living in registers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) for (i = 0; i < sizeof(good_stack); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) good_stack[i] = test_text[i % sizeof(test_text)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) /* This is a pointer to outside our current stack frame. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) if (bad_frame) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) bad_stack = do_usercopy_stack_callee((uintptr_t)&bad_stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) /* Put start address just inside stack. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) bad_stack = task_stack_page(current) + THREAD_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) bad_stack -= sizeof(unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) PROT_READ | PROT_WRITE | PROT_EXEC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) MAP_ANONYMOUS | MAP_PRIVATE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) if (user_addr >= TASK_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) pr_warn("Failed to allocate user memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) if (to_user) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) pr_info("attempting good copy_to_user of local stack\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) if (copy_to_user((void __user *)user_addr, good_stack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) unconst + sizeof(good_stack))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) pr_warn("copy_to_user failed unexpectedly?!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) goto free_user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) pr_info("attempting bad copy_to_user of distant stack\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) if (copy_to_user((void __user *)user_addr, bad_stack,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) unconst + sizeof(good_stack))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) pr_warn("copy_to_user failed, but lacked Oops\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) goto free_user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * There isn't a safe way to not be protected by usercopy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * if we're going to write to another thread's stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) if (!bad_frame)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) goto free_user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) pr_info("attempting good copy_from_user of local stack\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) if (copy_from_user(good_stack, (void __user *)user_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) unconst + sizeof(good_stack))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) pr_warn("copy_from_user failed unexpectedly?!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) goto free_user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) pr_info("attempting bad copy_from_user of distant stack\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) if (copy_from_user(bad_stack, (void __user *)user_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) unconst + sizeof(good_stack))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) pr_warn("copy_from_user failed, but lacked Oops\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) goto free_user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) free_user:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) vm_munmap(user_addr, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * This checks for whole-object size validation with hardened usercopy,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * with or without usercopy whitelisting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) static void do_usercopy_heap_size(bool to_user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) unsigned long user_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) unsigned char *one, *two;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) void __user *test_user_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) void *test_kern_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) size_t size = unconst + 1024;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) one = kmalloc(size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) two = kmalloc(size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) if (!one || !two) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) pr_warn("Failed to allocate kernel memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) goto free_kernel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) PROT_READ | PROT_WRITE | PROT_EXEC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) MAP_ANONYMOUS | MAP_PRIVATE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) if (user_addr >= TASK_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) pr_warn("Failed to allocate user memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) goto free_kernel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) memset(one, 'A', size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) memset(two, 'B', size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) test_user_addr = (void __user *)(user_addr + 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) test_kern_addr = one + 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) if (to_user) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) pr_info("attempting good copy_to_user of correct size\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) if (copy_to_user(test_user_addr, test_kern_addr, size / 2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) pr_warn("copy_to_user failed unexpectedly?!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) goto free_user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) pr_info("attempting bad copy_to_user of too large size\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) if (copy_to_user(test_user_addr, test_kern_addr, size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) pr_warn("copy_to_user failed, but lacked Oops\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) goto free_user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) pr_info("attempting good copy_from_user of correct size\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) if (copy_from_user(test_kern_addr, test_user_addr, size / 2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) pr_warn("copy_from_user failed unexpectedly?!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) goto free_user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) pr_info("attempting bad copy_from_user of too large size\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) if (copy_from_user(test_kern_addr, test_user_addr, size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) pr_warn("copy_from_user failed, but lacked Oops\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) goto free_user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) free_user:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) vm_munmap(user_addr, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) free_kernel:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) kfree(one);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) kfree(two);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * This checks for the specific whitelist window within an object. If this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * test passes, then do_usercopy_heap_size() tests will pass too.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) static void do_usercopy_heap_whitelist(bool to_user)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) unsigned long user_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) unsigned char *buf = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) unsigned char __user *user_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) size_t offset, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) /* Make sure cache was prepared. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) if (!whitelist_cache) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) pr_warn("Failed to allocate kernel cache\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * Allocate a buffer with a whitelisted window in the buffer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) buf = kmem_cache_alloc(whitelist_cache, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) if (!buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) pr_warn("Failed to allocate buffer from whitelist cache\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) goto free_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) /* Allocate user memory we'll poke at. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) user_alloc = vm_mmap(NULL, 0, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) PROT_READ | PROT_WRITE | PROT_EXEC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) MAP_ANONYMOUS | MAP_PRIVATE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if (user_alloc >= TASK_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) pr_warn("Failed to allocate user memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) goto free_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) user_addr = (void __user *)user_alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) memset(buf, 'B', cache_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) /* Whitelisted window in buffer, from kmem_cache_create_usercopy. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) offset = (cache_size / 4) + unconst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) size = (cache_size / 16) + unconst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) if (to_user) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) pr_info("attempting good copy_to_user inside whitelist\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (copy_to_user(user_addr, buf + offset, size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) pr_warn("copy_to_user failed unexpectedly?!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) goto free_user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) pr_info("attempting bad copy_to_user outside whitelist\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) if (copy_to_user(user_addr, buf + offset - 1, size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) pr_warn("copy_to_user failed, but lacked Oops\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) goto free_user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) pr_info("attempting good copy_from_user inside whitelist\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) if (copy_from_user(buf + offset, user_addr, size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) pr_warn("copy_from_user failed unexpectedly?!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) goto free_user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) pr_info("attempting bad copy_from_user outside whitelist\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) if (copy_from_user(buf + offset - 1, user_addr, size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) pr_warn("copy_from_user failed, but lacked Oops\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) goto free_user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) free_user:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) vm_munmap(user_alloc, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) free_alloc:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) if (buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) kmem_cache_free(whitelist_cache, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) /* Callable tests. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) void lkdtm_USERCOPY_HEAP_SIZE_TO(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) do_usercopy_heap_size(true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) void lkdtm_USERCOPY_HEAP_SIZE_FROM(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) do_usercopy_heap_size(false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) void lkdtm_USERCOPY_HEAP_WHITELIST_TO(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) do_usercopy_heap_whitelist(true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) void lkdtm_USERCOPY_HEAP_WHITELIST_FROM(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) do_usercopy_heap_whitelist(false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) void lkdtm_USERCOPY_STACK_FRAME_TO(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) do_usercopy_stack(true, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) void lkdtm_USERCOPY_STACK_FRAME_FROM(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) do_usercopy_stack(false, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) void lkdtm_USERCOPY_STACK_BEYOND(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) do_usercopy_stack(true, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) void lkdtm_USERCOPY_KERNEL(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) unsigned long user_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) PROT_READ | PROT_WRITE | PROT_EXEC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) MAP_ANONYMOUS | MAP_PRIVATE, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) if (user_addr >= TASK_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) pr_warn("Failed to allocate user memory\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) pr_info("attempting good copy_to_user from kernel rodata: %px\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) test_text);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) if (copy_to_user((void __user *)user_addr, test_text,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) unconst + sizeof(test_text))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) pr_warn("copy_to_user failed unexpectedly?!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) goto free_user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) pr_info("attempting bad copy_to_user from kernel text: %px\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) vm_mmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) if (copy_to_user((void __user *)user_addr, __va_function(vm_mmap),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) unconst + PAGE_SIZE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) pr_warn("copy_to_user failed, but lacked Oops\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) goto free_user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) pr_err("FAIL: survived bad copy_to_user()\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) free_user:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) vm_munmap(user_addr, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) void __init lkdtm_usercopy_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) /* Prepare cache that lacks SLAB_USERCOPY flag. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) whitelist_cache =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) kmem_cache_create_usercopy("lkdtm-usercopy", cache_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) cache_size / 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) cache_size / 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) void __exit lkdtm_usercopy_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) kmem_cache_destroy(whitelist_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }