^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Xtensa KASAN shadow map initialization
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * This file is subject to the terms and conditions of the GNU General Public
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * License. See the file "COPYING" in the main directory of this archive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright (C) 2017 Cadence Design Systems Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/init_task.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/kasan.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <asm/initialize_mmu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <asm/traps.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) void __init kasan_early_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) unsigned long vaddr = KASAN_SHADOW_START;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) pmd_t *pmd = pmd_off_k(vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) for (i = 0; i < PTRS_PER_PTE; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) set_pte(kasan_early_shadow_pte + i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) mk_pte(virt_to_page(kasan_early_shadow_page),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) PAGE_KERNEL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) for (vaddr = 0; vaddr < KASAN_SHADOW_SIZE; vaddr += PMD_SIZE, ++pmd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) BUG_ON(!pmd_none(*pmd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) set_pmd(pmd, __pmd((unsigned long)kasan_early_shadow_pte));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) early_trap_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) static void __init populate(void *start, void *end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) unsigned long n_pages = (end - start) / PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) unsigned long n_pmds = n_pages / PTRS_PER_PTE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) unsigned long i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) unsigned long vaddr = (unsigned long)start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) pmd_t *pmd = pmd_off_k(vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) pte_t *pte = memblock_alloc(n_pages * sizeof(pte_t), PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) if (!pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) __func__, n_pages * sizeof(pte_t), PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) pr_debug("%s: %p - %p\n", __func__, start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) for (i = j = 0; i < n_pmds; ++i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) int k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) for (k = 0; k < PTRS_PER_PTE; ++k, ++j) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) phys_addr_t phys =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) memblock_phys_alloc_range(PAGE_SIZE, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) MEMBLOCK_ALLOC_ANYWHERE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) if (!phys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) panic("Failed to allocate page table page\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) set_pte(pte + j, pfn_pte(PHYS_PFN(phys), PAGE_KERNEL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) for (i = 0; i < n_pmds ; ++i, pte += PTRS_PER_PTE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) set_pmd(pmd + i, __pmd((unsigned long)pte));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) local_flush_tlb_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) memset(start, 0, end - start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) void __init kasan_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) BUILD_BUG_ON(KASAN_SHADOW_OFFSET != KASAN_SHADOW_START -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) (KASAN_START_VADDR >> KASAN_SHADOW_SCALE_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) BUILD_BUG_ON(VMALLOC_START < KASAN_START_VADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * Replace shadow map pages that cover addresses from VMALLOC area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * start to the end of KSEG with clean writable pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) populate(kasan_mem_to_shadow((void *)VMALLOC_START),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) kasan_mem_to_shadow((void *)XCHAL_KSEG_BYPASS_VADDR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * Write protect kasan_early_shadow_page and zero-initialize it again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) for (i = 0; i < PTRS_PER_PTE; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) set_pte(kasan_early_shadow_pte + i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) mk_pte(virt_to_page(kasan_early_shadow_page),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) PAGE_KERNEL_RO));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) local_flush_tlb_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) memset(kasan_early_shadow_page, 0, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) /* At this point kasan is fully initialized. Enable error messages. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) current->kasan_depth = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) pr_info("KernelAddressSanitizer initialized\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) }