^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * This file contains common KASAN code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2014 Samsung Electronics Co., Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Some code borrowed from https://github.com/xairy/kasan-prototype by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Andrey Konovalov <andreyknvl@gmail.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/kasan.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/linkage.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/memory.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/printk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/sched/task_stack.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/stacktrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/bug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include "kasan.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include "../slab.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) depot_stack_handle_t kasan_save_stack(gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) unsigned long entries[KASAN_STACK_DEPTH];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) unsigned int nr_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) nr_entries = filter_irq_stacks(entries, nr_entries);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) return stack_depot_save(entries, nr_entries, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) void kasan_set_track(struct kasan_track *track, gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) track->pid = current->pid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) track->stack = kasan_save_stack(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) void kasan_enable_current(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) current->kasan_depth++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) void kasan_disable_current(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) current->kasan_depth--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) void __kasan_unpoison_range(const void *address, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) kasan_unpoison(address, size, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) #ifdef CONFIG_KASAN_STACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /* Unpoison the entire stack for a task. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) void kasan_unpoison_task_stack(struct task_struct *task)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) void *base = task_stack_page(task);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) kasan_unpoison(base, THREAD_SIZE, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) /* Unpoison the stack for the current task beyond a watermark sp value. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * Calculate the task stack base address. Avoid using 'current'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * because this function is called by early resume code which hasn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * yet set up the percpu register (%gs).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) kasan_unpoison(base, watermark - base, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #endif /* CONFIG_KASAN_STACK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * Only allow cache merging when stack collection is disabled and no metadata
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * is present.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) slab_flags_t __kasan_never_merge(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) if (kasan_stack_collection_enabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) return SLAB_KASAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) u8 tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) if (unlikely(PageHighMem(page)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) tag = kasan_random_tag();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) for (i = 0; i < (1 << order); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) page_kasan_tag_set(page + i, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) kasan_unpoison(page_address(page), PAGE_SIZE << order, init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) void __kasan_poison_pages(struct page *page, unsigned int order, bool init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (likely(!PageHighMem(page)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) kasan_poison(page_address(page), PAGE_SIZE << order,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) KASAN_FREE_PAGE, init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * For larger allocations larger redzones are used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) static inline unsigned int optimal_redzone(unsigned int object_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) object_size <= 64 - 16 ? 16 :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) object_size <= 128 - 32 ? 32 :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) object_size <= 512 - 64 ? 64 :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) object_size <= 4096 - 128 ? 128 :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) object_size <= (1 << 14) - 256 ? 256 :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) object_size <= (1 << 15) - 512 ? 512 :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) object_size <= (1 << 16) - 1024 ? 1024 : 2048;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) slab_flags_t *flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) unsigned int ok_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) unsigned int optimal_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * SLAB_KASAN is used to mark caches as ones that are sanitized by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * KASAN. Currently this flag is used in two places:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * 1. In slab_ksize() when calculating the size of the accessible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * memory within the object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * 2. In slab_common.c to prevent merging of sanitized caches.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) *flags |= SLAB_KASAN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) if (!kasan_stack_collection_enabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) ok_size = *size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) /* Add alloc meta into redzone. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) cache->kasan_info.alloc_meta_offset = *size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) *size += sizeof(struct kasan_alloc_meta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * If alloc meta doesn't fit, don't add it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * This can only happen with SLAB, as it has KMALLOC_MAX_SIZE equal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * to KMALLOC_MAX_CACHE_SIZE and doesn't fall back to page_alloc for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * larger sizes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) if (*size > KMALLOC_MAX_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) cache->kasan_info.alloc_meta_offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) *size = ok_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) /* Continue, since free meta might still fit. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) /* Only the generic mode uses free meta or flexible redzones. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (!IS_ENABLED(CONFIG_KASAN_GENERIC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * Add free meta into redzone when it's not possible to store
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * it in the object. This is the case when:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * 1. Object is SLAB_TYPESAFE_BY_RCU, which means that it can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * be touched after it was freed, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * 2. Object has a constructor, which means it's expected to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) * retain its content until the next allocation, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) * 3. Object is too small.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) * Otherwise cache->kasan_info.free_meta_offset = 0 is implied.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) if ((cache->flags & SLAB_TYPESAFE_BY_RCU) || cache->ctor ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) cache->object_size < sizeof(struct kasan_free_meta)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) ok_size = *size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) cache->kasan_info.free_meta_offset = *size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) *size += sizeof(struct kasan_free_meta);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) /* If free meta doesn't fit, don't add it. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if (*size > KMALLOC_MAX_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) cache->kasan_info.free_meta_offset = KASAN_NO_FREE_META;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) *size = ok_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) /* Calculate size with optimal redzone. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) optimal_size = cache->object_size + optimal_redzone(cache->object_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) /* Limit it with KMALLOC_MAX_SIZE (relevant for SLAB only). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (optimal_size > KMALLOC_MAX_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) optimal_size = KMALLOC_MAX_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) /* Use optimal size if the size with added metas is not large enough. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) if (*size < optimal_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) *size = optimal_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) void __kasan_cache_create_kmalloc(struct kmem_cache *cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) cache->kasan_info.is_kmalloc = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) size_t __kasan_metadata_size(struct kmem_cache *cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) if (!kasan_stack_collection_enabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) return (cache->kasan_info.alloc_meta_offset ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) sizeof(struct kasan_alloc_meta) : 0) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) (cache->kasan_info.free_meta_offset ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) sizeof(struct kasan_free_meta) : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) const void *object)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) if (!cache->kasan_info.alloc_meta_offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) return kasan_reset_tag(object) + cache->kasan_info.alloc_meta_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) #ifdef CONFIG_KASAN_GENERIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) const void *object)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) if (cache->kasan_info.free_meta_offset == KASAN_NO_FREE_META)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) return kasan_reset_tag(object) + cache->kasan_info.free_meta_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) void __kasan_poison_slab(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) for (i = 0; i < compound_nr(page); i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) page_kasan_tag_reset(page + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) kasan_poison(page_address(page), page_size(page),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) KASAN_KMALLOC_REDZONE, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) kasan_unpoison(object, cache->object_size, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) void __kasan_poison_object_data(struct kmem_cache *cache, void *object)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) KASAN_KMALLOC_REDZONE, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * This function assigns a tag to an object considering the following:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * 1. A cache might have a constructor, which might save a pointer to a slab
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * object somewhere (e.g. in the object itself). We preassign a tag for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) * each object in caches with constructors during slab creation and reuse
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) * the same tag each time a particular object is allocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * accessed after being freed. We preassign tags for objects in these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * caches as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) * 3. For SLAB allocator we can't preassign tags randomly since the freelist
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) * is stored as an array of indexes instead of a linked list. Assign tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) * based on objects indexes, so that objects that are next to each other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * get different tags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) static inline u8 assign_tag(struct kmem_cache *cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) const void *object, bool init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) if (IS_ENABLED(CONFIG_KASAN_GENERIC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) return 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) * set, assign a tag when the object is being allocated (init == false).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) return init ? KASAN_TAG_KERNEL : kasan_random_tag();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) /* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) #ifdef CONFIG_SLAB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) /* For SLAB assign tags based on the object index in the freelist. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) return (u8)obj_to_index(cache, virt_to_page(object), (void *)object);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * For SLUB assign a random tag during slab creation, otherwise reuse
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * the already assigned tag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) return init ? kasan_random_tag() : get_tag(object);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) const void *object)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) struct kasan_alloc_meta *alloc_meta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) if (kasan_stack_collection_enabled()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) alloc_meta = kasan_get_alloc_meta(cache, object);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) if (alloc_meta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) __memset(alloc_meta, 0, sizeof(*alloc_meta));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) /* Tag is ignored in set_tag() without CONFIG_KASAN_SW/HW_TAGS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) object = set_tag(object, assign_tag(cache, object, true));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) return (void *)object;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) static inline bool ____kasan_slab_free(struct kmem_cache *cache, void *object,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) unsigned long ip, bool quarantine, bool init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) u8 tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) void *tagged_object;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) tag = get_tag(object);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) tagged_object = object;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) object = kasan_reset_tag(object);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) if (is_kfence_address(object))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) object)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) kasan_report_invalid_free(tagged_object, ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) /* RCU slabs could be legally used after free within the RCU period */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) if (!kasan_byte_accessible(tagged_object)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) kasan_report_invalid_free(tagged_object, ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) KASAN_KMALLOC_FREE, init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) if (kasan_stack_collection_enabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) kasan_set_free_info(cache, object, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) return kasan_quarantine_put(cache, object);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) bool __kasan_slab_free(struct kmem_cache *cache, void *object,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) unsigned long ip, bool init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) return ____kasan_slab_free(cache, object, ip, true, init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) static inline bool ____kasan_kfree_large(void *ptr, unsigned long ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) if (ptr != page_address(virt_to_head_page(ptr))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) kasan_report_invalid_free(ptr, ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) if (!kasan_byte_accessible(ptr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) kasan_report_invalid_free(ptr, ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * The object will be poisoned by kasan_free_pages() or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * kasan_slab_free_mempool().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) void __kasan_kfree_large(void *ptr, unsigned long ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) ____kasan_kfree_large(ptr, ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) void __kasan_slab_free_mempool(void *ptr, unsigned long ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) page = virt_to_head_page(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) * Even though this function is only called for kmem_cache_alloc and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) * kmalloc backed mempool allocations, those allocations can still be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) * !PageSlab() when the size provided to kmalloc is larger than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) * KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) if (unlikely(!PageSlab(page))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) if (____kasan_kfree_large(ptr, ip))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) kasan_poison(ptr, page_size(page), KASAN_FREE_PAGE, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) ____kasan_slab_free(page->slab_cache, ptr, ip, false, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) static void set_alloc_info(struct kmem_cache *cache, void *object,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) gfp_t flags, bool is_kmalloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) struct kasan_alloc_meta *alloc_meta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) /* Don't save alloc info for kmalloc caches in kasan_slab_alloc(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) if (cache->kasan_info.is_kmalloc && !is_kmalloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) alloc_meta = kasan_get_alloc_meta(cache, object);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) if (alloc_meta)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) kasan_set_track(&alloc_meta->alloc_track, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) void * __must_check __kasan_slab_alloc(struct kmem_cache *cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) void *object, gfp_t flags, bool init)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) u8 tag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) void *tagged_object;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) if (gfpflags_allow_blocking(flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) kasan_quarantine_reduce();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) if (unlikely(object == NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) if (is_kfence_address(object))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) return (void *)object;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) * Generate and assign random tag for tag-based modes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) * Tag is ignored in set_tag() for the generic mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) tag = assign_tag(cache, object, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) tagged_object = set_tag(object, tag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * Unpoison the whole object.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) * For kmalloc() allocations, kasan_kmalloc() will do precise poisoning.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) kasan_unpoison(tagged_object, cache->object_size, init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) /* Save alloc info (if possible) for non-kmalloc() allocations. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) if (kasan_stack_collection_enabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) set_alloc_info(cache, (void *)object, flags, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) return tagged_object;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) static inline void *____kasan_kmalloc(struct kmem_cache *cache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) const void *object, size_t size, gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) unsigned long redzone_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) unsigned long redzone_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) if (gfpflags_allow_blocking(flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) kasan_quarantine_reduce();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) if (unlikely(object == NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) if (is_kfence_address(kasan_reset_tag(object)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) return (void *)object;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) * The object has already been unpoisoned by kasan_slab_alloc() for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) * kmalloc() or by kasan_krealloc() for krealloc().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) * The redzone has byte-level precision for the generic mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) * Partially poison the last object granule to cover the unaligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) * part of the redzone.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) if (IS_ENABLED(CONFIG_KASAN_GENERIC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) kasan_poison_last_granule((void *)object, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) /* Poison the aligned part of the redzone. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) redzone_start = round_up((unsigned long)(object + size),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) KASAN_GRANULE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) redzone_end = round_up((unsigned long)(object + cache->object_size),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) KASAN_GRANULE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) kasan_poison((void *)redzone_start, redzone_end - redzone_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) KASAN_KMALLOC_REDZONE, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) * Save alloc info (if possible) for kmalloc() allocations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) * This also rewrites the alloc info when called from kasan_krealloc().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) if (kasan_stack_collection_enabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) set_alloc_info(cache, (void *)object, flags, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) /* Keep the tag that was set by kasan_slab_alloc(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) return (void *)object;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) void * __must_check __kasan_kmalloc(struct kmem_cache *cache, const void *object,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) size_t size, gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) return ____kasan_kmalloc(cache, object, size, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) EXPORT_SYMBOL(__kasan_kmalloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) unsigned long redzone_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) unsigned long redzone_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) if (gfpflags_allow_blocking(flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) kasan_quarantine_reduce();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) if (unlikely(ptr == NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) * The object has already been unpoisoned by kasan_alloc_pages() for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) * alloc_pages() or by kasan_krealloc() for krealloc().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) * The redzone has byte-level precision for the generic mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) * Partially poison the last object granule to cover the unaligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) * part of the redzone.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) if (IS_ENABLED(CONFIG_KASAN_GENERIC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) kasan_poison_last_granule(ptr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) /* Poison the aligned part of the redzone. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) redzone_start = round_up((unsigned long)(ptr + size),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) KASAN_GRANULE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) redzone_end = (unsigned long)ptr + page_size(virt_to_page(ptr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) kasan_poison((void *)redzone_start, redzone_end - redzone_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) KASAN_PAGE_REDZONE, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) return (void *)ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) if (unlikely(object == ZERO_SIZE_PTR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) return (void *)object;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) * Unpoison the object's data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) * Part of it might already have been unpoisoned, but it's unknown
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) * how big that part is.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) kasan_unpoison(object, size, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) page = virt_to_head_page(object);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) /* Piggy-back on kmalloc() instrumentation to poison the redzone. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) if (unlikely(!PageSlab(page)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) return __kasan_kmalloc_large(object, size, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) return ____kasan_kmalloc(page->slab_cache, object, size, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) bool __kasan_check_byte(const void *address, unsigned long ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) if (!kasan_byte_accessible(address)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) kasan_report((unsigned long)address, 1, false, ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) }