^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (c) 2014 Samsung Electronics Co., Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Author: Andrey Ryabinin <a.ryabinin@samsung.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/kasan.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/mman.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/printk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/random.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <kunit/test.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include "../mm/kasan/kasan.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_GRANULE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * Some tests use these global variables to store return values from function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * calls that could otherwise be eliminated by the compiler as dead code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) void *kasan_ptr_result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) int kasan_int_result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) static struct kunit_resource resource;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) static struct kunit_kasan_expectation fail_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) static bool multishot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * Temporarily enable multi-shot mode. Otherwise, KASAN would only report the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * first detected bug and panic the kernel if panic_on_warn is enabled. For
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * hardware tag-based KASAN also allow tag checking to be reenabled for each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * test, see the comment for KUNIT_EXPECT_KASAN_FAIL().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static int kasan_test_init(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) if (!kasan_enabled()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) kunit_err(test, "can't run KASAN tests with KASAN disabled");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) multishot = kasan_save_enable_multi_shot();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) kasan_set_tagging_report_once(false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) static void kasan_test_exit(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) kasan_set_tagging_report_once(true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) kasan_restore_multi_shot(multishot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * KUNIT_EXPECT_KASAN_FAIL() - check that the executed expression produces a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * KASAN report; causes a test failure otherwise. This relies on a KUnit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * resource named "kasan_data". Do not use this name for KUnit resources
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * outside of KASAN tests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * For hardware tag-based KASAN in sync mode, when a tag fault happens, tag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * checking is auto-disabled. When this happens, this test handler reenables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * tag checking. As tag checking can be only disabled or enabled per CPU,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * this handler disables migration (preemption).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * Since the compiler doesn't see that the expression can change the fail_data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * fields, it can reorder or optimize away the accesses to those fields.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * Use READ/WRITE_ONCE() for the accesses and compiler barriers around the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * expression to prevent that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define KUNIT_EXPECT_KASAN_FAIL(test, expression) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) !kasan_async_mode_enabled()) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) migrate_disable(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) WRITE_ONCE(fail_data.report_expected, true); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) WRITE_ONCE(fail_data.report_found, false); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) kunit_add_named_resource(test, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) NULL, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) NULL, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) &resource, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) "kasan_data", &fail_data); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) barrier(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) expression; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) barrier(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) if (kasan_async_mode_enabled()) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) kasan_force_async_fault(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) barrier(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) KUNIT_EXPECT_EQ(test, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) READ_ONCE(fail_data.report_expected), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) READ_ONCE(fail_data.report_found)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) if (IS_ENABLED(CONFIG_KASAN_HW_TAGS) && \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) !kasan_async_mode_enabled()) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) if (READ_ONCE(fail_data.report_found)) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) kasan_enable_tagging_sync(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) migrate_enable(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define KASAN_TEST_NEEDS_CONFIG_ON(test, config) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) if (!IS_ENABLED(config)) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) kunit_info((test), "skipping, " #config " required"); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) return; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #define KASAN_TEST_NEEDS_CONFIG_OFF(test, config) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if (IS_ENABLED(config)) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) kunit_info((test), "skipping, " #config " enabled"); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) return; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) static void kmalloc_oob_right(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) char *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) size_t size = 123;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) ptr = kmalloc(size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 'x');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) kfree(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) static void kmalloc_oob_left(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) char *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) size_t size = 15;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) ptr = kmalloc(size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) KUNIT_EXPECT_KASAN_FAIL(test, *ptr = *(ptr - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) kfree(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) static void kmalloc_node_oob_right(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) char *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) size_t size = 4096;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) ptr = kmalloc_node(size, GFP_KERNEL, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) kfree(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * These kmalloc_pagealloc_* tests try allocating a memory chunk that doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * fit into a slab cache and therefore is allocated via the page allocator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * fallback. Since this kind of fallback is only implemented for SLUB, these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * tests are limited to that allocator.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) static void kmalloc_pagealloc_oob_right(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) char *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) ptr = kmalloc(size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) KUNIT_EXPECT_KASAN_FAIL(test, ptr[size + OOB_TAG_OFF] = 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) kfree(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) static void kmalloc_pagealloc_uaf(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) char *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) ptr = kmalloc(size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) kfree(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) static void kmalloc_pagealloc_invalid_free(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) char *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) size_t size = KMALLOC_MAX_CACHE_SIZE + 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) ptr = kmalloc(size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) KUNIT_EXPECT_KASAN_FAIL(test, kfree(ptr + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) static void pagealloc_oob_right(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) char *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) struct page *pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) size_t order = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) size_t size = (1UL << (PAGE_SHIFT + order));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * With generic KASAN page allocations have no redzones, thus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * out-of-bounds detection is not guaranteed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * See https://bugzilla.kernel.org/show_bug.cgi?id=210503.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) pages = alloc_pages(GFP_KERNEL, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) ptr = page_address(pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) free_pages((unsigned long)ptr, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) static void pagealloc_uaf(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) char *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) struct page *pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) size_t order = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) pages = alloc_pages(GFP_KERNEL, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) ptr = page_address(pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) free_pages((unsigned long)ptr, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) KUNIT_EXPECT_KASAN_FAIL(test, ptr[0] = 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) static void kmalloc_large_oob_right(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) char *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) size_t size = KMALLOC_MAX_CACHE_SIZE - 256;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * Allocate a chunk that is large enough, but still fits into a slab
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * and does not trigger the page allocator fallback in SLUB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) ptr = kmalloc(size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) KUNIT_EXPECT_KASAN_FAIL(test, ptr[size] = 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) kfree(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) static void krealloc_more_oob_helper(struct kunit *test,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) size_t size1, size_t size2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) char *ptr1, *ptr2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) size_t middle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) KUNIT_ASSERT_LT(test, size1, size2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) middle = size1 + (size2 - size1) / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) ptr1 = kmalloc(size1, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) /* All offsets up to size2 must be accessible. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) ptr2[size1 - 1] = 'x';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) ptr2[size1] = 'x';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) ptr2[middle] = 'x';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) ptr2[size2 - 1] = 'x';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) /* Generic mode is precise, so unaligned size2 must be inaccessible. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) if (IS_ENABLED(CONFIG_KASAN_GENERIC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) /* For all modes first aligned offset after size2 must be inaccessible. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) KUNIT_EXPECT_KASAN_FAIL(test,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) kfree(ptr2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) static void krealloc_less_oob_helper(struct kunit *test,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) size_t size1, size_t size2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) char *ptr1, *ptr2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) size_t middle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) KUNIT_ASSERT_LT(test, size2, size1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) middle = size2 + (size1 - size2) / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) ptr1 = kmalloc(size1, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) ptr2 = krealloc(ptr1, size2, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) /* Must be accessible for all modes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) ptr2[size2 - 1] = 'x';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) /* Generic mode is precise, so unaligned size2 must be inaccessible. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) if (IS_ENABLED(CONFIG_KASAN_GENERIC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size2] = 'x');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) /* For all modes first aligned offset after size2 must be inaccessible. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) KUNIT_EXPECT_KASAN_FAIL(test,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) ptr2[round_up(size2, KASAN_GRANULE_SIZE)] = 'x');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) * For all modes all size2, middle, and size1 should land in separate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) * granules and thus the latter two offsets should be inaccessible.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) KUNIT_EXPECT_LE(test, round_up(size2, KASAN_GRANULE_SIZE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) round_down(middle, KASAN_GRANULE_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) KUNIT_EXPECT_LE(test, round_up(middle, KASAN_GRANULE_SIZE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) round_down(size1, KASAN_GRANULE_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) KUNIT_EXPECT_KASAN_FAIL(test, ptr2[middle] = 'x');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1 - 1] = 'x');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) KUNIT_EXPECT_KASAN_FAIL(test, ptr2[size1] = 'x');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) kfree(ptr2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) static void krealloc_more_oob(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) krealloc_more_oob_helper(test, 201, 235);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) static void krealloc_less_oob(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) krealloc_less_oob_helper(test, 235, 201);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) static void krealloc_pagealloc_more_oob(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) /* page_alloc fallback in only implemented for SLUB. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) krealloc_more_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 201,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) KMALLOC_MAX_CACHE_SIZE + 235);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) static void krealloc_pagealloc_less_oob(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) /* page_alloc fallback in only implemented for SLUB. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_SLUB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) krealloc_less_oob_helper(test, KMALLOC_MAX_CACHE_SIZE + 235,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) KMALLOC_MAX_CACHE_SIZE + 201);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) * Check that krealloc() detects a use-after-free, returns NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) * and doesn't unpoison the freed object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) static void krealloc_uaf(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) char *ptr1, *ptr2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) int size1 = 201;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) int size2 = 235;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) ptr1 = kmalloc(size1, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) kfree(ptr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) KUNIT_EXPECT_KASAN_FAIL(test, ptr2 = krealloc(ptr1, size2, GFP_KERNEL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) KUNIT_ASSERT_PTR_EQ(test, (void *)ptr2, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)ptr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) static void kmalloc_oob_16(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) u64 words[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) } *ptr1, *ptr2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) /* This test is specifically crafted for the generic mode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) ptr1 = kmalloc(sizeof(*ptr1) - 3, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) kfree(ptr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) kfree(ptr2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) static void kmalloc_uaf_16(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) u64 words[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) } *ptr1, *ptr2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) ptr1 = kmalloc(sizeof(*ptr1), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) ptr2 = kmalloc(sizeof(*ptr2), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) kfree(ptr2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) KUNIT_EXPECT_KASAN_FAIL(test, *ptr1 = *ptr2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) kfree(ptr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) static void kmalloc_oob_memset_2(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) char *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) size_t size = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) ptr = kmalloc(size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 7 + OOB_TAG_OFF, 0, 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) kfree(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) static void kmalloc_oob_memset_4(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) char *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) size_t size = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) ptr = kmalloc(size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 5 + OOB_TAG_OFF, 0, 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) kfree(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) static void kmalloc_oob_memset_8(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) char *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) size_t size = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) ptr = kmalloc(size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 1 + OOB_TAG_OFF, 0, 8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) kfree(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) static void kmalloc_oob_memset_16(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) char *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) size_t size = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) ptr = kmalloc(size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr + 1 + OOB_TAG_OFF, 0, 16));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) kfree(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) static void kmalloc_oob_in_memset(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) char *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) size_t size = 666;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) ptr = kmalloc(size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr, 0, size + 5 + OOB_TAG_OFF));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) kfree(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) static void kmalloc_memmove_invalid_size(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) char *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) size_t size = 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) volatile size_t invalid_size = -2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) ptr = kmalloc(size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) memset((char *)ptr, 0, 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) KUNIT_EXPECT_KASAN_FAIL(test,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) memmove((char *)ptr, (char *)ptr + 4, invalid_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) kfree(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) static void kmalloc_uaf(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) char *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) size_t size = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) ptr = kmalloc(size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) kfree(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) KUNIT_EXPECT_KASAN_FAIL(test, *(ptr + 8) = 'x');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) static void kmalloc_uaf_memset(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) char *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) size_t size = 33;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) ptr = kmalloc(size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) kfree(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) KUNIT_EXPECT_KASAN_FAIL(test, memset(ptr, 0, size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) static void kmalloc_uaf2(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) char *ptr1, *ptr2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) size_t size = 43;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) int counter = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) ptr1 = kmalloc(size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) kfree(ptr1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) ptr2 = kmalloc(size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) * For tag-based KASAN ptr1 and ptr2 tags might happen to be the same.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) * Allow up to 16 attempts at generating different tags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) if (!IS_ENABLED(CONFIG_KASAN_GENERIC) && ptr1 == ptr2 && counter++ < 16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) kfree(ptr2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) KUNIT_EXPECT_KASAN_FAIL(test, ptr1[40] = 'x');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) KUNIT_EXPECT_PTR_NE(test, ptr1, ptr2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) kfree(ptr2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) static void kfree_via_page(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) char *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) size_t size = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) unsigned long offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) ptr = kmalloc(size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) page = virt_to_page(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) offset = offset_in_page(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) kfree(page_address(page) + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) static void kfree_via_phys(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) char *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) size_t size = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) phys_addr_t phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) ptr = kmalloc(size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) phys = virt_to_phys(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) kfree(phys_to_virt(phys));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) static void kmem_cache_oob(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) char *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) size_t size = 200;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) struct kmem_cache *cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) p = kmem_cache_alloc(cache, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) if (!p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) kunit_err(test, "Allocation failed: %s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) kmem_cache_destroy(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) KUNIT_EXPECT_KASAN_FAIL(test, *p = p[size + OOB_TAG_OFF]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) kmem_cache_free(cache, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) kmem_cache_destroy(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) static void kmem_cache_accounted(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) char *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) size_t size = 200;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) struct kmem_cache *cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) cache = kmem_cache_create("test_cache", size, 0, SLAB_ACCOUNT, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) * Several allocations with a delay to allow for lazy per memcg kmem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) * cache creation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) for (i = 0; i < 5; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) p = kmem_cache_alloc(cache, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) goto free_cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) kmem_cache_free(cache, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) msleep(100);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) free_cache:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) kmem_cache_destroy(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) static void kmem_cache_bulk(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) struct kmem_cache *cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) size_t size = 200;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) char *p[10];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) bool ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) ret = kmem_cache_alloc_bulk(cache, GFP_KERNEL, ARRAY_SIZE(p), (void **)&p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) kunit_err(test, "Allocation failed: %s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) kmem_cache_destroy(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) for (i = 0; i < ARRAY_SIZE(p); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) p[i][0] = p[i][size - 1] = 42;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) kmem_cache_free_bulk(cache, ARRAY_SIZE(p), (void **)&p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) kmem_cache_destroy(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) static char global_array[10];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) static void kasan_global_oob(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) * Deliberate out-of-bounds access. To prevent CONFIG_UBSAN_LOCAL_BOUNDS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) * from failing here and panicing the kernel, access the array via a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) * volatile pointer, which will prevent the compiler from being able to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) * determine the array bounds.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) * This access uses a volatile pointer to char (char *volatile) rather
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) * than the more conventional pointer to volatile char (volatile char *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) * because we want to prevent the compiler from making inferences about
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) * the pointer itself (i.e. its array bounds), not the data that it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) * refers to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) char *volatile array = global_array;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) char *p = &array[ARRAY_SIZE(global_array) + 3];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) /* Only generic mode instruments globals. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) /* Check that ksize() makes the whole object accessible. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) static void ksize_unpoisons_memory(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) char *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) size_t size = 123, real_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) ptr = kmalloc(size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) real_size = ksize(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) /* This access shouldn't trigger a KASAN report. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) ptr[size] = 'x';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) /* This one must. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) KUNIT_EXPECT_KASAN_FAIL(test, ptr[real_size] = 'y');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) kfree(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) * Check that a use-after-free is detected by ksize() and via normal accesses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) * after it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) static void ksize_uaf(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) char *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) int size = 128 - KASAN_GRANULE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) ptr = kmalloc(size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) kfree(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) KUNIT_EXPECT_KASAN_FAIL(test, ksize(ptr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = *ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = *(ptr + size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) static void kasan_stack_oob(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) char stack_array[10];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) /* See comment in kasan_global_oob. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) char *volatile array = stack_array;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) char *p = &array[ARRAY_SIZE(stack_array) + OOB_TAG_OFF];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) static void kasan_alloca_oob_left(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) volatile int i = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) char alloca_array[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) /* See comment in kasan_global_oob. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) char *volatile array = alloca_array;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) char *p = array - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) /* Only generic mode instruments dynamic allocas. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) static void kasan_alloca_oob_right(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) volatile int i = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) char alloca_array[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) /* See comment in kasan_global_oob. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) char *volatile array = alloca_array;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) char *p = array + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) /* Only generic mode instruments dynamic allocas. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_STACK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) KUNIT_EXPECT_KASAN_FAIL(test, *(volatile char *)p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) static void kmem_cache_double_free(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) char *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) size_t size = 200;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) struct kmem_cache *cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) cache = kmem_cache_create("test_cache", size, 0, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) p = kmem_cache_alloc(cache, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) if (!p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) kunit_err(test, "Allocation failed: %s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) kmem_cache_destroy(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) kmem_cache_free(cache, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) kmem_cache_destroy(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) static void kmem_cache_invalid_free(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) char *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) size_t size = 200;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) struct kmem_cache *cache;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) cache = kmem_cache_create("test_cache", size, 0, SLAB_TYPESAFE_BY_RCU,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) p = kmem_cache_alloc(cache, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) if (!p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) kunit_err(test, "Allocation failed: %s\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) kmem_cache_destroy(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) /* Trigger invalid free, the object doesn't get freed. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) KUNIT_EXPECT_KASAN_FAIL(test, kmem_cache_free(cache, p + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) * Properly free the object to prevent the "Objects remaining in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) * test_cache on __kmem_cache_shutdown" BUG failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) kmem_cache_free(cache, p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) kmem_cache_destroy(cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) static void kasan_memchr(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) char *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) size_t size = 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) if (OOB_TAG_OFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) size = round_up(size, OOB_TAG_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) KUNIT_EXPECT_KASAN_FAIL(test,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) kasan_ptr_result = memchr(ptr, '1', size + 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) kfree(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) static void kasan_memcmp(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) char *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) size_t size = 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) int arr[9];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) if (OOB_TAG_OFF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) size = round_up(size, OOB_TAG_OFF);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) memset(arr, 0, sizeof(arr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) KUNIT_EXPECT_KASAN_FAIL(test,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) kasan_int_result = memcmp(ptr, arr, size+1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) kfree(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) static void kasan_strings(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) char *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) size_t size = 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) * str* functions are not instrumented with CONFIG_AMD_MEM_ENCRYPT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) * See https://bugzilla.kernel.org/show_bug.cgi?id=206337 for details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_AMD_MEM_ENCRYPT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) ptr = kmalloc(size, GFP_KERNEL | __GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) kfree(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) * Try to cause only 1 invalid access (less spam in dmesg).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) * For that we need ptr to point to zeroed byte.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) * Skip metadata that could be stored in freed object so ptr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) * will likely point to zeroed byte.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) ptr += 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strchr(ptr, '1'));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) KUNIT_EXPECT_KASAN_FAIL(test, kasan_ptr_result = strrchr(ptr, '1'));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strcmp(ptr, "2"));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strncmp(ptr, "2", 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strlen(ptr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = strnlen(ptr, 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) static void kasan_bitops_modify(struct kunit *test, int nr, void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) KUNIT_EXPECT_KASAN_FAIL(test, set_bit(nr, addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) KUNIT_EXPECT_KASAN_FAIL(test, __set_bit(nr, addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) KUNIT_EXPECT_KASAN_FAIL(test, clear_bit(nr, addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit(nr, addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) KUNIT_EXPECT_KASAN_FAIL(test, clear_bit_unlock(nr, addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) KUNIT_EXPECT_KASAN_FAIL(test, __clear_bit_unlock(nr, addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) KUNIT_EXPECT_KASAN_FAIL(test, change_bit(nr, addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) KUNIT_EXPECT_KASAN_FAIL(test, __change_bit(nr, addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) static void kasan_bitops_test_and_modify(struct kunit *test, int nr, void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit(nr, addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) KUNIT_EXPECT_KASAN_FAIL(test, __test_and_set_bit(nr, addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) KUNIT_EXPECT_KASAN_FAIL(test, test_and_set_bit_lock(nr, addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) KUNIT_EXPECT_KASAN_FAIL(test, test_and_clear_bit(nr, addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) KUNIT_EXPECT_KASAN_FAIL(test, __test_and_clear_bit(nr, addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) KUNIT_EXPECT_KASAN_FAIL(test, test_and_change_bit(nr, addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) KUNIT_EXPECT_KASAN_FAIL(test, __test_and_change_bit(nr, addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result = test_bit(nr, addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) #if defined(clear_bit_unlock_is_negative_byte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) KUNIT_EXPECT_KASAN_FAIL(test, kasan_int_result =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) clear_bit_unlock_is_negative_byte(nr, addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) static void kasan_bitops_generic(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) long *bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) /* This test is specifically crafted for the generic mode. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_GENERIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) * Allocate 1 more byte, which causes kzalloc to round up to 16 bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) * this way we do not actually corrupt other memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) bits = kzalloc(sizeof(*bits) + 1, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) * Below calls try to access bit within allocated memory; however, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) * below accesses are still out-of-bounds, since bitops are defined to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) * operate on the whole long the bit is in.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) kasan_bitops_modify(test, BITS_PER_LONG, bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) * Below calls try to access bit beyond allocated memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) kfree(bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) static void kasan_bitops_tags(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) long *bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) /* This test is specifically crafted for tag-based modes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) /* kmalloc-64 cache will be used and the last 16 bytes will be the redzone. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) bits = kzalloc(48, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) /* Do the accesses past the 48 allocated bytes, but within the redone. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) kasan_bitops_modify(test, BITS_PER_LONG, (void *)bits + 48);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) kasan_bitops_test_and_modify(test, BITS_PER_LONG + BITS_PER_BYTE, (void *)bits + 48);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) kfree(bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) static void kmalloc_double_kzfree(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) char *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) size_t size = 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) ptr = kmalloc(size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) kfree_sensitive(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) KUNIT_EXPECT_KASAN_FAIL(test, kfree_sensitive(ptr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) static void vmalloc_oob(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) void *area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_KASAN_VMALLOC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) * We have to be careful not to hit the guard page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) * The MMU will catch that and crash us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) area = vmalloc(3000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, area);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) KUNIT_EXPECT_KASAN_FAIL(test, ((volatile char *)area)[3100]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) vfree(area);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) * Check that the assigned pointer tag falls within the [KASAN_TAG_MIN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) * KASAN_TAG_KERNEL) range (note: excluding the match-all tag) for tag-based
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) * modes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) static void match_all_not_assigned(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) char *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) struct page *pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) int i, size, order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) for (i = 0; i < 256; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) size = (get_random_int() % 1024) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) ptr = kmalloc(size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) kfree(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) for (i = 0; i < 256; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) order = (get_random_int() % 4) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) pages = alloc_pages(GFP_KERNEL, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) ptr = page_address(pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) KUNIT_EXPECT_GE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_MIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) KUNIT_EXPECT_LT(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) free_pages((unsigned long)ptr, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) /* Check that 0xff works as a match-all pointer tag for tag-based modes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) static void match_all_ptr_tag(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) char *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) u8 tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) ptr = kmalloc(128, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) /* Backup the assigned tag. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) tag = get_tag(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) KUNIT_EXPECT_NE(test, tag, (u8)KASAN_TAG_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) /* Reset the tag to 0xff.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) ptr = set_tag(ptr, KASAN_TAG_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) /* This access shouldn't trigger a KASAN report. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) *ptr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) /* Recover the pointer tag and free. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) ptr = set_tag(ptr, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) kfree(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) /* Check that there are no match-all memory tags for tag-based modes. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) static void match_all_mem_tag(struct kunit *test)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) char *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) int tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) KASAN_TEST_NEEDS_CONFIG_OFF(test, CONFIG_KASAN_GENERIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) ptr = kmalloc(128, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) KUNIT_EXPECT_NE(test, (u8)get_tag(ptr), (u8)KASAN_TAG_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) /* For each possible tag value not matching the pointer tag. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) for (tag = KASAN_TAG_MIN; tag <= KASAN_TAG_KERNEL; tag++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) if (tag == get_tag(ptr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) /* Mark the first memory granule with the chosen memory tag. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) kasan_poison(ptr, KASAN_GRANULE_SIZE, (u8)tag, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) /* This access must cause a KASAN report. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) KUNIT_EXPECT_KASAN_FAIL(test, *ptr = 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) /* Recover the memory tag and free. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) kasan_poison(ptr, KASAN_GRANULE_SIZE, get_tag(ptr), false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) kfree(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) static struct kunit_case kasan_kunit_test_cases[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) KUNIT_CASE(kmalloc_oob_right),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) KUNIT_CASE(kmalloc_oob_left),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) KUNIT_CASE(kmalloc_node_oob_right),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) KUNIT_CASE(kmalloc_pagealloc_oob_right),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) KUNIT_CASE(kmalloc_pagealloc_uaf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) KUNIT_CASE(kmalloc_pagealloc_invalid_free),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) KUNIT_CASE(pagealloc_oob_right),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) KUNIT_CASE(pagealloc_uaf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) KUNIT_CASE(kmalloc_large_oob_right),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) KUNIT_CASE(krealloc_more_oob),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) KUNIT_CASE(krealloc_less_oob),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) KUNIT_CASE(krealloc_pagealloc_more_oob),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) KUNIT_CASE(krealloc_pagealloc_less_oob),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) KUNIT_CASE(krealloc_uaf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) KUNIT_CASE(kmalloc_oob_16),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) KUNIT_CASE(kmalloc_uaf_16),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) KUNIT_CASE(kmalloc_oob_in_memset),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) KUNIT_CASE(kmalloc_oob_memset_2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) KUNIT_CASE(kmalloc_oob_memset_4),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) KUNIT_CASE(kmalloc_oob_memset_8),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) KUNIT_CASE(kmalloc_oob_memset_16),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) KUNIT_CASE(kmalloc_memmove_invalid_size),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) KUNIT_CASE(kmalloc_uaf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) KUNIT_CASE(kmalloc_uaf_memset),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) KUNIT_CASE(kmalloc_uaf2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) KUNIT_CASE(kfree_via_page),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) KUNIT_CASE(kfree_via_phys),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) KUNIT_CASE(kmem_cache_oob),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) KUNIT_CASE(kmem_cache_accounted),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) KUNIT_CASE(kmem_cache_bulk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) KUNIT_CASE(kasan_global_oob),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) KUNIT_CASE(kasan_stack_oob),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) KUNIT_CASE(kasan_alloca_oob_left),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) KUNIT_CASE(kasan_alloca_oob_right),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) KUNIT_CASE(ksize_unpoisons_memory),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) KUNIT_CASE(ksize_uaf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) KUNIT_CASE(kmem_cache_double_free),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) KUNIT_CASE(kmem_cache_invalid_free),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) KUNIT_CASE(kasan_memchr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) KUNIT_CASE(kasan_memcmp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) KUNIT_CASE(kasan_strings),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) KUNIT_CASE(kasan_bitops_generic),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) KUNIT_CASE(kasan_bitops_tags),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) KUNIT_CASE(kmalloc_double_kzfree),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) KUNIT_CASE(vmalloc_oob),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) KUNIT_CASE(match_all_not_assigned),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) KUNIT_CASE(match_all_ptr_tag),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) KUNIT_CASE(match_all_mem_tag),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) static struct kunit_suite kasan_kunit_test_suite = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) .name = "kasan",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) .init = kasan_test_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) .test_cases = kasan_kunit_test_cases,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) .exit = kasan_test_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) kunit_test_suite(kasan_kunit_test_suite);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) MODULE_LICENSE("GPL");