^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Test cases for KFENCE memory safety error detector. Since the interface with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * which KFENCE's reports are obtained is via the console, this is the output we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * should verify. For each test case checks the presence (or absence) of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * generated reports. Relies on 'console' tracepoint to capture reports as they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * appear in the kernel log.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Copyright (C) 2020, Google LLC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Author: Alexander Potapenko <glider@google.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Marco Elver <elver@google.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <kunit/test.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/jiffies.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/kfence.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/random.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/tracepoint.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <trace/events/printk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include "kfence.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) /* Report as observed from console. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) static struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) int nlines;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) char lines[2][256];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) } observed = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) .lock = __SPIN_LOCK_UNLOCKED(observed.lock),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) /* Probe for console output: obtains observed lines of interest. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) static void probe_console(void *ignore, const char *buf, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) int nlines;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) spin_lock_irqsave(&observed.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) nlines = observed.nlines;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) if (strnstr(buf, "BUG: KFENCE: ", len) && strnstr(buf, "test_", len)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * KFENCE report and related to the test.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * The provided @buf is not NUL-terminated; copy no more than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * @len bytes and let strscpy() add the missing NUL-terminator.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) strscpy(observed.lines[0], buf, min(len + 1, sizeof(observed.lines[0])));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) nlines = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) } else if (nlines == 1 && (strnstr(buf, "at 0x", len) || strnstr(buf, "of 0x", len))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) strscpy(observed.lines[nlines++], buf, min(len + 1, sizeof(observed.lines[0])));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) WRITE_ONCE(observed.nlines, nlines); /* Publish new nlines. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) spin_unlock_irqrestore(&observed.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) /* Check if a report related to the test exists. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) static bool report_available(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) return READ_ONCE(observed.nlines) == ARRAY_SIZE(observed.lines);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) /* Information we expect in a report. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct expect_report {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) enum kfence_error_type type; /* The type or error. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) void *fn; /* Function pointer to expected function where access occurred. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) char *addr; /* Address at which the bad access occurred. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) bool is_write; /* Is access a write. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) static const char *get_access_type(const struct expect_report *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) return r->is_write ? "write" : "read";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) /* Check observed report matches information in @r. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) static bool report_matches(const struct expect_report *r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) bool ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) typeof(observed.lines) expect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) const char *end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) char *cur;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /* Doubled-checked locking. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) if (!report_available())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) /* Generate expected report contents. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) /* Title */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) cur = expect[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) end = &expect[0][sizeof(expect[0]) - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) switch (r->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) case KFENCE_ERROR_OOB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) cur += scnprintf(cur, end - cur, "BUG: KFENCE: out-of-bounds %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) get_access_type(r));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) case KFENCE_ERROR_UAF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) cur += scnprintf(cur, end - cur, "BUG: KFENCE: use-after-free %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) get_access_type(r));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) case KFENCE_ERROR_CORRUPTION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) cur += scnprintf(cur, end - cur, "BUG: KFENCE: memory corruption");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) case KFENCE_ERROR_INVALID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) cur += scnprintf(cur, end - cur, "BUG: KFENCE: invalid %s",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) get_access_type(r));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) case KFENCE_ERROR_INVALID_FREE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) cur += scnprintf(cur, end - cur, "BUG: KFENCE: invalid free");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) scnprintf(cur, end - cur, " in %pS", r->fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) /* The exact offset won't match, remove it; also strip module name. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) cur = strchr(expect[0], '+');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) if (cur)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) *cur = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) /* Access information */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) cur = expect[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) end = &expect[1][sizeof(expect[1]) - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) switch (r->type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) case KFENCE_ERROR_OOB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) cur += scnprintf(cur, end - cur, "Out-of-bounds %s at", get_access_type(r));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) case KFENCE_ERROR_UAF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) cur += scnprintf(cur, end - cur, "Use-after-free %s at", get_access_type(r));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) case KFENCE_ERROR_CORRUPTION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) cur += scnprintf(cur, end - cur, "Corrupted memory at");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) case KFENCE_ERROR_INVALID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) cur += scnprintf(cur, end - cur, "Invalid %s at", get_access_type(r));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) case KFENCE_ERROR_INVALID_FREE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) cur += scnprintf(cur, end - cur, "Invalid free of");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) cur += scnprintf(cur, end - cur, " 0x%p", (void *)r->addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) spin_lock_irqsave(&observed.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) if (!report_available())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) goto out; /* A new report is being captured. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) /* Finally match expected output to what we actually observed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) ret = strstr(observed.lines[0], expect[0]) && strstr(observed.lines[1], expect[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) spin_unlock_irqrestore(&observed.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) /* ===== Test cases ===== */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) #define TEST_PRIV_WANT_MEMCACHE ((void *)1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) /* Cache used by tests; if NULL, allocate from kmalloc instead. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) static struct kmem_cache *test_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) static size_t setup_test_cache(struct kunit *test, size_t size, slab_flags_t flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) void (*ctor)(void *))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) if (test->priv != TEST_PRIV_WANT_MEMCACHE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) kunit_info(test, "%s: size=%zu, ctor=%ps\n", __func__, size, ctor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * Use SLAB_NOLEAKTRACE to prevent merging with existing caches. Any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * other flag in SLAB_NEVER_MERGE also works. Use SLAB_ACCOUNT to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * allocate via memcg, if enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) flags |= SLAB_NOLEAKTRACE | SLAB_ACCOUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) test_cache = kmem_cache_create("test", size, 1, flags, ctor);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) KUNIT_ASSERT_TRUE_MSG(test, test_cache, "could not create cache");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) static void test_cache_destroy(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) if (!test_cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) kmem_cache_destroy(test_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) test_cache = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) static inline size_t kmalloc_cache_alignment(size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) return kmalloc_caches[kmalloc_type(GFP_KERNEL)][kmalloc_index(size)]->align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) /* Must always inline to match stack trace against caller. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) static __always_inline void test_free(void *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (test_cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) kmem_cache_free(test_cache, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) kfree(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * If this should be a KFENCE allocation, and on which side the allocation and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * the closest guard page should be.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) enum allocation_policy {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) ALLOCATE_ANY, /* KFENCE, any side. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) ALLOCATE_LEFT, /* KFENCE, left side of page. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) ALLOCATE_RIGHT, /* KFENCE, right side of page. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) ALLOCATE_NONE, /* No KFENCE allocation. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * Try to get a guarded allocation from KFENCE. Uses either kmalloc() or the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * current test_cache if set up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) static void *test_alloc(struct kunit *test, size_t size, gfp_t gfp, enum allocation_policy policy)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) void *alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) unsigned long timeout, resched_after;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) const char *policy_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) switch (policy) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) case ALLOCATE_ANY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) policy_name = "any";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) case ALLOCATE_LEFT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) policy_name = "left";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) case ALLOCATE_RIGHT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) policy_name = "right";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) case ALLOCATE_NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) policy_name = "none";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) kunit_info(test, "%s: size=%zu, gfp=%x, policy=%s, cache=%i\n", __func__, size, gfp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) policy_name, !!test_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * 100x the sample interval should be more than enough to ensure we get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) * a KFENCE allocation eventually.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) timeout = jiffies + msecs_to_jiffies(100 * CONFIG_KFENCE_SAMPLE_INTERVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * Especially for non-preemption kernels, ensure the allocation-gate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) * timer can catch up: after @resched_after, every failed allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * attempt yields, to ensure the allocation-gate timer is scheduled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) resched_after = jiffies + msecs_to_jiffies(CONFIG_KFENCE_SAMPLE_INTERVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) if (test_cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) alloc = kmem_cache_alloc(test_cache, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) alloc = kmalloc(size, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) if (is_kfence_address(alloc)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) struct page *page = virt_to_head_page(alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) struct kmem_cache *s = test_cache ?: kmalloc_caches[kmalloc_type(GFP_KERNEL)][kmalloc_index(size)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) * Verify that various helpers return the right values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) * even for KFENCE objects; these are required so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * memcg accounting works correctly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) KUNIT_EXPECT_EQ(test, obj_to_index(s, page, alloc), 0U);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) KUNIT_EXPECT_EQ(test, objs_per_slab_page(s, page), 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) if (policy == ALLOCATE_ANY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) return alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) if (policy == ALLOCATE_LEFT && IS_ALIGNED((unsigned long)alloc, PAGE_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) return alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) if (policy == ALLOCATE_RIGHT &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) !IS_ALIGNED((unsigned long)alloc, PAGE_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) return alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) } else if (policy == ALLOCATE_NONE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) return alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) test_free(alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) if (time_after(jiffies, resched_after))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) } while (time_before(jiffies, timeout));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) KUNIT_ASSERT_TRUE_MSG(test, false, "failed to allocate from KFENCE");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) return NULL; /* Unreachable. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) static void test_out_of_bounds_read(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) size_t size = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) struct expect_report expect = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) .type = KFENCE_ERROR_OOB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) .fn = test_out_of_bounds_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) .is_write = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) setup_test_cache(test, size, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * If we don't have our own cache, adjust based on alignment, so that we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * actually access guard pages on either side.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) if (!test_cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) size = kmalloc_cache_alignment(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) /* Test both sides. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_LEFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) expect.addr = buf - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) READ_ONCE(*expect.addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) KUNIT_EXPECT_TRUE(test, report_matches(&expect));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) test_free(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) expect.addr = buf + size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) READ_ONCE(*expect.addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) KUNIT_EXPECT_TRUE(test, report_matches(&expect));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) test_free(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) static void test_out_of_bounds_write(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) size_t size = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) struct expect_report expect = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) .type = KFENCE_ERROR_OOB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) .fn = test_out_of_bounds_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) .is_write = true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) setup_test_cache(test, size, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_LEFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) expect.addr = buf - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) WRITE_ONCE(*expect.addr, 42);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) KUNIT_EXPECT_TRUE(test, report_matches(&expect));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) test_free(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) static void test_use_after_free_read(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) const size_t size = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) struct expect_report expect = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) .type = KFENCE_ERROR_UAF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) .fn = test_use_after_free_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) .is_write = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) setup_test_cache(test, size, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) expect.addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) test_free(expect.addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) READ_ONCE(*expect.addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) KUNIT_EXPECT_TRUE(test, report_matches(&expect));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) static void test_double_free(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) const size_t size = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) struct expect_report expect = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) .type = KFENCE_ERROR_INVALID_FREE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) .fn = test_double_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) setup_test_cache(test, size, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) expect.addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) test_free(expect.addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) test_free(expect.addr); /* Double-free. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) KUNIT_EXPECT_TRUE(test, report_matches(&expect));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) static void test_invalid_addr_free(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) const size_t size = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) struct expect_report expect = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) .type = KFENCE_ERROR_INVALID_FREE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) .fn = test_invalid_addr_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) setup_test_cache(test, size, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) expect.addr = buf + 1; /* Free on invalid address. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) test_free(expect.addr); /* Invalid address free. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) test_free(buf); /* No error. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) KUNIT_EXPECT_TRUE(test, report_matches(&expect));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) static void test_corruption(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) size_t size = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) struct expect_report expect = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) .type = KFENCE_ERROR_CORRUPTION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) .fn = test_corruption,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) setup_test_cache(test, size, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) /* Test both sides. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_LEFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) expect.addr = buf + size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) WRITE_ONCE(*expect.addr, 42);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) test_free(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) KUNIT_EXPECT_TRUE(test, report_matches(&expect));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) expect.addr = buf - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) WRITE_ONCE(*expect.addr, 42);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) test_free(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) KUNIT_EXPECT_TRUE(test, report_matches(&expect));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * KFENCE is unable to detect an OOB if the allocation's alignment requirements
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * leave a gap between the object and the guard page. Specifically, an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) * allocation of e.g. 73 bytes is aligned on 8 and 128 bytes for SLUB or SLAB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * respectively. Therefore it is impossible for the allocated object to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) * contiguously line up with the right guard page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) * However, we test that an access to memory beyond the gap results in KFENCE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) * detecting an OOB access.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) static void test_kmalloc_aligned_oob_read(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) const size_t size = 73;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) const size_t align = kmalloc_cache_alignment(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) struct expect_report expect = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) .type = KFENCE_ERROR_OOB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) .fn = test_kmalloc_aligned_oob_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) .is_write = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) * The object is offset to the right, so there won't be an OOB to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * left of it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) READ_ONCE(*(buf - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) KUNIT_EXPECT_FALSE(test, report_available());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) * @buf must be aligned on @align, therefore buf + size belongs to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) * same page -> no OOB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) READ_ONCE(*(buf + size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) KUNIT_EXPECT_FALSE(test, report_available());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) /* Overflowing by @align bytes will result in an OOB. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) expect.addr = buf + size + align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) READ_ONCE(*expect.addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) KUNIT_EXPECT_TRUE(test, report_matches(&expect));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) test_free(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) static void test_kmalloc_aligned_oob_write(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) const size_t size = 73;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) struct expect_report expect = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) .type = KFENCE_ERROR_CORRUPTION,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) .fn = test_kmalloc_aligned_oob_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) * The object is offset to the right, so we won't get a page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) * fault immediately after it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) expect.addr = buf + size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) WRITE_ONCE(*expect.addr, READ_ONCE(*expect.addr) + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) KUNIT_EXPECT_FALSE(test, report_available());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) test_free(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) KUNIT_EXPECT_TRUE(test, report_matches(&expect));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) /* Test cache shrinking and destroying with KFENCE. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) static void test_shrink_memcache(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) const size_t size = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) void *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) setup_test_cache(test, size, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) KUNIT_EXPECT_TRUE(test, test_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) kmem_cache_shrink(test_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) test_free(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) KUNIT_EXPECT_FALSE(test, report_available());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) static void ctor_set_x(void *obj)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) /* Every object has at least 8 bytes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) memset(obj, 'x', 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) /* Ensure that SL*B does not modify KFENCE objects on bulk free. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) static void test_free_bulk(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) int iter;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) for (iter = 0; iter < 5; iter++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) const size_t size = setup_test_cache(test, 8 + prandom_u32_max(300), 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) (iter & 1) ? ctor_set_x : NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) void *objects[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) test_alloc(test, size, GFP_KERNEL, ALLOCATE_RIGHT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) test_alloc(test, size, GFP_KERNEL, ALLOCATE_NONE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) test_alloc(test, size, GFP_KERNEL, ALLOCATE_LEFT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) test_alloc(test, size, GFP_KERNEL, ALLOCATE_NONE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) test_alloc(test, size, GFP_KERNEL, ALLOCATE_NONE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) kmem_cache_free_bulk(test_cache, ARRAY_SIZE(objects), objects);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) KUNIT_ASSERT_FALSE(test, report_available());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) test_cache_destroy();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) /* Test init-on-free works. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) static void test_init_on_free(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) const size_t size = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) struct expect_report expect = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) .type = KFENCE_ERROR_UAF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) .fn = test_init_on_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) .is_write = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) if (!IS_ENABLED(CONFIG_INIT_ON_FREE_DEFAULT_ON))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) /* Assume it hasn't been disabled on command line. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) setup_test_cache(test, size, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) expect.addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) for (i = 0; i < size; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) expect.addr[i] = i + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) test_free(expect.addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) for (i = 0; i < size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) * This may fail if the page was recycled by KFENCE and then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) * written to again -- this however, is near impossible with a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) * default config.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) KUNIT_EXPECT_EQ(test, expect.addr[i], (char)0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) if (!i) /* Only check first access to not fail test if page is ever re-protected. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) KUNIT_EXPECT_TRUE(test, report_matches(&expect));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) /* Ensure that constructors work properly. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) static void test_memcache_ctor(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) const size_t size = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) char *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) setup_test_cache(test, size, 0, ctor_set_x);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) for (i = 0; i < 8; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) KUNIT_EXPECT_EQ(test, buf[i], (char)'x');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) test_free(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) KUNIT_EXPECT_FALSE(test, report_available());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) /* Test that memory is zeroed if requested. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) static void test_gfpzero(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) const size_t size = PAGE_SIZE; /* PAGE_SIZE so we can use ALLOCATE_ANY. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) char *buf1, *buf2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) if (CONFIG_KFENCE_SAMPLE_INTERVAL > 100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) kunit_warn(test, "skipping ... would take too long\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) setup_test_cache(test, size, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) buf1 = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) for (i = 0; i < size; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) buf1[i] = i + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) test_free(buf1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) /* Try to get same address again -- this can take a while. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) for (i = 0;; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) buf2 = test_alloc(test, size, GFP_KERNEL | __GFP_ZERO, ALLOCATE_ANY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) if (buf1 == buf2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) test_free(buf2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) if (i == CONFIG_KFENCE_NUM_OBJECTS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) kunit_warn(test, "giving up ... cannot get same object back\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) for (i = 0; i < size; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) KUNIT_EXPECT_EQ(test, buf2[i], (char)0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) test_free(buf2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) KUNIT_EXPECT_FALSE(test, report_available());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) static void test_invalid_access(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) const struct expect_report expect = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) .type = KFENCE_ERROR_INVALID,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) .fn = test_invalid_access,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) .addr = &__kfence_pool[10],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) .is_write = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) READ_ONCE(__kfence_pool[10]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) KUNIT_EXPECT_TRUE(test, report_matches(&expect));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) /* Test SLAB_TYPESAFE_BY_RCU works. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) static void test_memcache_typesafe_by_rcu(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) const size_t size = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) struct expect_report expect = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) .type = KFENCE_ERROR_UAF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) .fn = test_memcache_typesafe_by_rcu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) .is_write = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) setup_test_cache(test, size, SLAB_TYPESAFE_BY_RCU, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) KUNIT_EXPECT_TRUE(test, test_cache); /* Want memcache. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) expect.addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) *expect.addr = 42;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) test_free(expect.addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) KUNIT_EXPECT_EQ(test, *expect.addr, (char)42);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) * Up to this point, memory should not have been freed yet, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) * therefore there should be no KFENCE report from the above access.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) /* Above access to @expect.addr should not have generated a report! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) KUNIT_EXPECT_FALSE(test, report_available());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) /* Only after rcu_barrier() is the memory guaranteed to be freed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) rcu_barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) /* Expect use-after-free. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) KUNIT_EXPECT_EQ(test, *expect.addr, (char)42);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) KUNIT_EXPECT_TRUE(test, report_matches(&expect));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) /* Test krealloc(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) static void test_krealloc(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) const size_t size = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) const struct expect_report expect = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) .type = KFENCE_ERROR_UAF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) .fn = test_krealloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) .addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) .is_write = false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) char *buf = expect.addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) KUNIT_EXPECT_FALSE(test, test_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) KUNIT_EXPECT_EQ(test, ksize(buf), size); /* Precise size match after KFENCE alloc. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) for (i = 0; i < size; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) buf[i] = i + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) /* Check that we successfully change the size. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) buf = krealloc(buf, size * 3, GFP_KERNEL); /* Grow. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) /* Note: Might no longer be a KFENCE alloc. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) KUNIT_EXPECT_GE(test, ksize(buf), size * 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) for (i = 0; i < size; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) KUNIT_EXPECT_EQ(test, buf[i], (char)(i + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) for (; i < size * 3; i++) /* Fill to extra bytes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) buf[i] = i + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) buf = krealloc(buf, size * 2, GFP_KERNEL); /* Shrink. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) KUNIT_EXPECT_GE(test, ksize(buf), size * 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) for (i = 0; i < size * 2; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) KUNIT_EXPECT_EQ(test, buf[i], (char)(i + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) buf = krealloc(buf, 0, GFP_KERNEL); /* Free. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) KUNIT_EXPECT_EQ(test, (unsigned long)buf, (unsigned long)ZERO_SIZE_PTR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) KUNIT_ASSERT_FALSE(test, report_available()); /* No reports yet! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) READ_ONCE(*expect.addr); /* Ensure krealloc() actually freed earlier KFENCE object. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) KUNIT_ASSERT_TRUE(test, report_matches(&expect));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) /* Test that some objects from a bulk allocation belong to KFENCE pool. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) static void test_memcache_alloc_bulk(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) const size_t size = 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) bool pass = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) unsigned long timeout;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) setup_test_cache(test, size, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) KUNIT_EXPECT_TRUE(test, test_cache); /* Want memcache. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) * 100x the sample interval should be more than enough to ensure we get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) * a KFENCE allocation eventually.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) timeout = jiffies + msecs_to_jiffies(100 * CONFIG_KFENCE_SAMPLE_INTERVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) void *objects[100];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) int i, num = kmem_cache_alloc_bulk(test_cache, GFP_ATOMIC, ARRAY_SIZE(objects),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) objects);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) if (!num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) for (i = 0; i < ARRAY_SIZE(objects); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) if (is_kfence_address(objects[i])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) pass = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) kmem_cache_free_bulk(test_cache, num, objects);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) * kmem_cache_alloc_bulk() disables interrupts, and calling it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) * in a tight loop may not give KFENCE a chance to switch the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) * static branch. Call cond_resched() to let KFENCE chime in.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) } while (!pass && time_before(jiffies, timeout));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) KUNIT_EXPECT_TRUE(test, pass);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) KUNIT_EXPECT_FALSE(test, report_available());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) * KUnit does not provide a way to provide arguments to tests, and we encode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) * additional info in the name. Set up 2 tests per test case, one using the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) * default allocator, and another using a custom memcache (suffix '-memcache').
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) #define KFENCE_KUNIT_CASE(test_name) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) { .run_case = test_name, .name = #test_name }, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) { .run_case = test_name, .name = #test_name "-memcache" }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) static struct kunit_case kfence_test_cases[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) KFENCE_KUNIT_CASE(test_out_of_bounds_read),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) KFENCE_KUNIT_CASE(test_out_of_bounds_write),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) KFENCE_KUNIT_CASE(test_use_after_free_read),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) KFENCE_KUNIT_CASE(test_double_free),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) KFENCE_KUNIT_CASE(test_invalid_addr_free),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) KFENCE_KUNIT_CASE(test_corruption),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) KFENCE_KUNIT_CASE(test_free_bulk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) KFENCE_KUNIT_CASE(test_init_on_free),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) KUNIT_CASE(test_kmalloc_aligned_oob_read),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) KUNIT_CASE(test_kmalloc_aligned_oob_write),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) KUNIT_CASE(test_shrink_memcache),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) KUNIT_CASE(test_memcache_ctor),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) KUNIT_CASE(test_invalid_access),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) KUNIT_CASE(test_gfpzero),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) KUNIT_CASE(test_memcache_typesafe_by_rcu),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) KUNIT_CASE(test_krealloc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) KUNIT_CASE(test_memcache_alloc_bulk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) {},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) /* ===== End test cases ===== */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) static int test_init(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) spin_lock_irqsave(&observed.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) for (i = 0; i < ARRAY_SIZE(observed.lines); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) observed.lines[i][0] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) observed.nlines = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) spin_unlock_irqrestore(&observed.lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) /* Any test with 'memcache' in its name will want a memcache. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) if (strstr(test->name, "memcache"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) test->priv = TEST_PRIV_WANT_MEMCACHE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) test->priv = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) static void test_exit(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) test_cache_destroy();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) static struct kunit_suite kfence_test_suite = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) .name = "kfence",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) .test_cases = kfence_test_cases,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) .init = test_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) .exit = test_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) static struct kunit_suite *kfence_test_suites[] = { &kfence_test_suite, NULL };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) static void register_tracepoints(struct tracepoint *tp, void *ignore)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) check_trace_callback_type_console(probe_console);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) if (!strcmp(tp->name, "console"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) WARN_ON(tracepoint_probe_register(tp, probe_console, NULL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) static void unregister_tracepoints(struct tracepoint *tp, void *ignore)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) if (!strcmp(tp->name, "console"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) tracepoint_probe_unregister(tp, probe_console, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) * We only want to do tracepoints setup and teardown once, therefore we have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) * customize the init and exit functions and cannot rely on kunit_test_suite().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) static int __init kfence_test_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) * Because we want to be able to build the test as a module, we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) * iterate through all known tracepoints, since the static registration
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) * won't work here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) for_each_kernel_tracepoint(register_tracepoints, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) return __kunit_test_suites_init(kfence_test_suites);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) static void kfence_test_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) __kunit_test_suites_exit(kfence_test_suites);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) for_each_kernel_tracepoint(unregister_tracepoints, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) tracepoint_synchronize_unregister();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) late_initcall(kfence_test_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) module_exit(kfence_test_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) MODULE_LICENSE("GPL v2");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) MODULE_AUTHOR("Alexander Potapenko <glider@google.com>, Marco Elver <elver@google.com>");