^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright(c) 2017 Intel Corporation. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * This code is based in part on work published here:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * https://github.com/IAIK/KAISER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * The original work was written by and and signed off by for the Linux
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * kernel by:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Signed-off-by: Richard Fellner <richard.fellner@student.tugraz.at>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Signed-off-by: Moritz Lipp <moritz.lipp@iaik.tugraz.at>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * Signed-off-by: Daniel Gruss <daniel.gruss@iaik.tugraz.at>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * Signed-off-by: Michael Schwarz <michael.schwarz@iaik.tugraz.at>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * Major changes to the original code by: Dave Hansen <dave.hansen@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * Mostly rewritten by Thomas Gleixner <tglx@linutronix.de> and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * Andy Lutomirsky <luto@amacapital.net>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/bug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <asm/cpufeature.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <asm/hypervisor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <asm/vsyscall.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <asm/cmdline.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <asm/pti.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <asm/desc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include <asm/sections.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #include <asm/set_memory.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #undef pr_fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define pr_fmt(fmt) "Kernel/User page tables isolation: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) /* Backporting helper */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #ifndef __GFP_NOTRACK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define __GFP_NOTRACK 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * Define the page-table levels we clone for user-space on 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * and 64 bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define PTI_LEVEL_KERNEL_IMAGE PTI_CLONE_PMD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define PTI_LEVEL_KERNEL_IMAGE PTI_CLONE_PTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) static void __init pti_print_if_insecure(const char *reason)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) if (boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) pr_info("%s\n", reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) static void __init pti_print_if_secure(const char *reason)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) pr_info("%s\n", reason);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) static enum pti_mode {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) PTI_AUTO = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) PTI_FORCE_OFF,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) PTI_FORCE_ON
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) } pti_mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) void __init pti_check_boottime_disable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) char arg[5];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) /* Assume mode is auto unless overridden. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) pti_mode = PTI_AUTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) if (hypervisor_is_type(X86_HYPER_XEN_PV)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) pti_mode = PTI_FORCE_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) pti_print_if_insecure("disabled on XEN PV.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) ret = cmdline_find_option(boot_command_line, "pti", arg, sizeof(arg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) if (ret > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) if (ret == 3 && !strncmp(arg, "off", 3)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) pti_mode = PTI_FORCE_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) pti_print_if_insecure("disabled on command line.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) if (ret == 2 && !strncmp(arg, "on", 2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) pti_mode = PTI_FORCE_ON;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) pti_print_if_secure("force enabled on command line.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) goto enable;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) if (ret == 4 && !strncmp(arg, "auto", 4)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) pti_mode = PTI_AUTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) goto autosel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) if (cmdline_find_option_bool(boot_command_line, "nopti") ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) cpu_mitigations_off()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) pti_mode = PTI_FORCE_OFF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) pti_print_if_insecure("disabled on command line.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) autosel:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) enable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) setup_force_cpu_cap(X86_FEATURE_PTI);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) pgd_t __pti_set_user_pgtbl(pgd_t *pgdp, pgd_t pgd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * Changes to the high (kernel) portion of the kernelmode page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * tables are not automatically propagated to the usermode tables.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * Users should keep in mind that, unlike the kernelmode tables,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * there is no vmalloc_fault equivalent for the usermode tables.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * Top-level entries added to init_mm's usermode pgd after boot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * will not be automatically propagated to other mms.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) if (!pgdp_maps_userspace(pgdp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) return pgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * The user page tables get the full PGD, accessible from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * userspace:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) kernel_to_user_pgdp(pgdp)->pgd = pgd.pgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * If this is normal user memory, make it NX in the kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * pagetables so that, if we somehow screw up and return to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * usermode with the kernel CR3 loaded, we'll get a page fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * instead of allowing user code to execute with the wrong CR3.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * As exceptions, we don't set NX if:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) * - _PAGE_USER is not set. This could be an executable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * EFI runtime mapping or something similar, and the kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * may execute from it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * - we don't have NX support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * - we're clearing the PGD (i.e. the new pgd is not present).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) if ((pgd.pgd & (_PAGE_USER|_PAGE_PRESENT)) == (_PAGE_USER|_PAGE_PRESENT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) (__supported_pte_mask & _PAGE_NX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) pgd.pgd |= _PAGE_NX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) /* return the copy of the PGD we want the kernel to use: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) return pgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * Walk the user copy of the page tables (optionally) trying to allocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * page table pages on the way down.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * Returns a pointer to a P4D on success, or NULL on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) static p4d_t *pti_user_pagetable_walk_p4d(unsigned long address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) pgd_t *pgd = kernel_to_user_pgdp(pgd_offset_k(address));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) if (address < PAGE_OFFSET) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) WARN_ONCE(1, "attempt to walk user address\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) if (pgd_none(*pgd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) unsigned long new_p4d_page = __get_free_page(gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) if (WARN_ON_ONCE(!new_p4d_page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) BUILD_BUG_ON(pgd_large(*pgd) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) return p4d_offset(pgd, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * Walk the user copy of the page tables (optionally) trying to allocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * page table pages on the way down.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * Returns a pointer to a PMD on success, or NULL on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) static pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) p4d_t *p4d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) pud_t *pud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) p4d = pti_user_pagetable_walk_p4d(address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (!p4d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) BUILD_BUG_ON(p4d_large(*p4d) != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) if (p4d_none(*p4d)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) unsigned long new_pud_page = __get_free_page(gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) if (WARN_ON_ONCE(!new_pud_page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) set_p4d(p4d, __p4d(_KERNPG_TABLE | __pa(new_pud_page)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) pud = pud_offset(p4d, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) /* The user page tables do not use large mappings: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) if (pud_large(*pud)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) if (pud_none(*pud)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) unsigned long new_pmd_page = __get_free_page(gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) if (WARN_ON_ONCE(!new_pmd_page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) return pmd_offset(pud, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * Walk the shadow copy of the page tables (optionally) trying to allocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * page table pages on the way down. Does not support large pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * Note: this is only used when mapping *new* kernel data into the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * user/shadow page tables. It is never used for userspace data.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) * Returns a pointer to a PTE on success, or NULL on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) static pte_t *pti_user_pagetable_walk_pte(unsigned long address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) pmd_t *pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) pte_t *pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) pmd = pti_user_pagetable_walk_pmd(address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if (!pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) /* We can't do anything sensible if we hit a large mapping. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) if (pmd_large(*pmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) if (pmd_none(*pmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) unsigned long new_pte_page = __get_free_page(gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) if (!new_pte_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(new_pte_page)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) pte = pte_offset_kernel(pmd, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) if (pte_flags(*pte) & _PAGE_USER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) WARN_ONCE(1, "attempt to walk to user pte\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) return pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) #ifdef CONFIG_X86_VSYSCALL_EMULATION
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) static void __init pti_setup_vsyscall(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) pte_t *pte, *target_pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) unsigned int level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) pte = lookup_address(VSYSCALL_ADDR, &level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) if (!pte || WARN_ON(level != PG_LEVEL_4K) || pte_none(*pte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) if (WARN_ON(!target_pte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) *target_pte = *pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) set_vsyscall_pgtable_user_bits(kernel_to_user_pgdp(swapper_pg_dir));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) static void __init pti_setup_vsyscall(void) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) enum pti_clone_level {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) PTI_CLONE_PMD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) PTI_CLONE_PTE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) pti_clone_pgtable(unsigned long start, unsigned long end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) enum pti_clone_level level)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * Clone the populated PMDs which cover start to end. These PMD areas
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * can have holes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) for (addr = start; addr < end;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) pte_t *pte, *target_pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) pmd_t *pmd, *target_pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) pgd_t *pgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) p4d_t *p4d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) pud_t *pud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) /* Overflow check */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if (addr < start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) pgd = pgd_offset_k(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) if (WARN_ON(pgd_none(*pgd)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) p4d = p4d_offset(pgd, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) if (WARN_ON(p4d_none(*p4d)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) pud = pud_offset(p4d, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) if (pud_none(*pud)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) WARN_ON_ONCE(addr & ~PUD_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) addr = round_up(addr + 1, PUD_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) pmd = pmd_offset(pud, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) if (pmd_none(*pmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) WARN_ON_ONCE(addr & ~PMD_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) addr = round_up(addr + 1, PMD_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) if (pmd_large(*pmd) || level == PTI_CLONE_PMD) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) target_pmd = pti_user_pagetable_walk_pmd(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) if (WARN_ON(!target_pmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) * Only clone present PMDs. This ensures only setting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) * _PAGE_GLOBAL on present PMDs. This should only be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) * called on well-known addresses anyway, so a non-
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * present PMD would be a surprise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (WARN_ON(!(pmd_flags(*pmd) & _PAGE_PRESENT)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * Setting 'target_pmd' below creates a mapping in both
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) * the user and kernel page tables. It is effectively
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) * global, so set it as global in both copies. Note:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) * the X86_FEATURE_PGE check is not _required_ because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) * the CPU ignores _PAGE_GLOBAL when PGE is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) * supported. The check keeps consistentency with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) * code that only set this bit when supported.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) if (boot_cpu_has(X86_FEATURE_PGE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) *pmd = pmd_set_flags(*pmd, _PAGE_GLOBAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) * Copy the PMD. That is, the kernelmode and usermode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) * tables will share the last-level page tables of this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) * address range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) *target_pmd = *pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) addr += PMD_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) } else if (level == PTI_CLONE_PTE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) /* Walk the page-table down to the pte level */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) pte = pte_offset_kernel(pmd, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) if (pte_none(*pte)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) addr += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) /* Only clone present PTEs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) if (WARN_ON(!(pte_flags(*pte) & _PAGE_PRESENT)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) /* Allocate PTE in the user page-table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) target_pte = pti_user_pagetable_walk_pte(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) if (WARN_ON(!target_pte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) /* Set GLOBAL bit in both PTEs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) if (boot_cpu_has(X86_FEATURE_PGE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) *pte = pte_set_flags(*pte, _PAGE_GLOBAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) /* Clone the PTE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) *target_pte = *pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) addr += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) * Clone a single p4d (i.e. a top-level entry on 4-level systems and a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) * next-level entry on 5-level systems.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) static void __init pti_clone_p4d(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) p4d_t *kernel_p4d, *user_p4d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) pgd_t *kernel_pgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) user_p4d = pti_user_pagetable_walk_p4d(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) if (!user_p4d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) kernel_pgd = pgd_offset_k(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) kernel_p4d = p4d_offset(kernel_pgd, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) *user_p4d = *kernel_p4d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) * Clone the CPU_ENTRY_AREA and associated data into the user space visible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) * page table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) static void __init pti_clone_user_shared(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) pti_clone_p4d(CPU_ENTRY_AREA_BASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) * The SYSCALL64 entry code needs to be able to find the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) * thread stack and needs one word of scratch space in which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) * to spill a register. All of this lives in the TSS, in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) * the sp1 and sp2 slots.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) * This is done for all possible CPUs during boot to ensure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * that it's propagated to all mms.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) unsigned long va = (unsigned long)&per_cpu(cpu_tss_rw, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) phys_addr_t pa = per_cpu_ptr_to_phys((void *)va);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) pte_t *target_pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) target_pte = pti_user_pagetable_walk_pte(va);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) if (WARN_ON(!target_pte))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) *target_pte = pfn_pte(pa >> PAGE_SHIFT, PAGE_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) #else /* CONFIG_X86_64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) * On 32 bit PAE systems with 1GB of Kernel address space there is only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) * one pgd/p4d for the whole kernel. Cloning that would map the whole
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) * address space into the user page-tables, making PTI useless. So clone
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) * the page-table on the PMD level to prevent that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) static void __init pti_clone_user_shared(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) unsigned long start, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) start = CPU_ENTRY_AREA_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) end = start + (PAGE_SIZE * CPU_ENTRY_AREA_PAGES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) pti_clone_pgtable(start, end, PTI_CLONE_PMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) #endif /* CONFIG_X86_64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) * Clone the ESPFIX P4D into the user space visible page table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) static void __init pti_setup_espfix64(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) #ifdef CONFIG_X86_ESPFIX64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) pti_clone_p4d(ESPFIX_BASE_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) * Clone the populated PMDs of the entry text and force it RO.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) static void pti_clone_entry_text(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) pti_clone_pgtable((unsigned long) __entry_text_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) (unsigned long) __entry_text_end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) PTI_CLONE_PMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) * If CFI is enabled, also map jump tables, so the entry code can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) * make indirect calls.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) if (IS_ENABLED(CONFIG_CFI_CLANG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) pti_clone_pgtable((unsigned long) __cfi_jt_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) (unsigned long) __cfi_jt_end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) PTI_CLONE_PMD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) * Global pages and PCIDs are both ways to make kernel TLB entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) * live longer, reduce TLB misses and improve kernel performance.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) * But, leaving all kernel text Global makes it potentially accessible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) * to Meltdown-style attacks which make it trivial to find gadgets or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) * defeat KASLR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) * Only use global pages when it is really worth it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) static inline bool pti_kernel_image_global_ok(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) * Systems with PCIDs get litlle benefit from global
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) * kernel text and are not worth the downsides.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) if (cpu_feature_enabled(X86_FEATURE_PCID))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * Only do global kernel image for pti=auto. Do the most
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * secure thing (not global) if pti=on specified.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) if (pti_mode != PTI_AUTO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) * K8 may not tolerate the cleared _PAGE_RW on the userspace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) * global kernel image pages. Do the safe thing (disable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) * global kernel image). This is unlikely to ever be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) * noticed because PTI is disabled by default on AMD CPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) if (boot_cpu_has(X86_FEATURE_K8))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * RANDSTRUCT derives its hardening benefits from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) * attacker's lack of knowledge about the layout of kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) * data structures. Keep the kernel image non-global in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) * cases where RANDSTRUCT is in use to help keep the layout a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) * secret.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) if (IS_ENABLED(CONFIG_GCC_PLUGIN_RANDSTRUCT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) * For some configurations, map all of kernel text into the user page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) * tables. This reduces TLB misses, especially on non-PCID systems.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) static void pti_clone_kernel_text(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) * rodata is part of the kernel image and is normally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) * readable on the filesystem or on the web. But, do not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) * clone the areas past rodata, they might contain secrets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) unsigned long start = PFN_ALIGN(_text);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) unsigned long end_clone = (unsigned long)__end_rodata_aligned;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) unsigned long end_global = PFN_ALIGN((unsigned long)_etext);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) if (!pti_kernel_image_global_ok())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) pr_debug("mapping partial kernel image into user address space\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) * Note that this will undo _some_ of the work that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) * pti_set_kernel_image_nonglobal() did to clear the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) * global bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) pti_clone_pgtable(start, end_clone, PTI_LEVEL_KERNEL_IMAGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) * pti_clone_pgtable() will set the global bit in any PMDs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) * that it clones, but we also need to get any PTEs in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) * the last level for areas that are not huge-page-aligned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) /* Set the global bit for normal non-__init kernel text: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) set_memory_global(start, (end_global - start) >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) static void pti_set_kernel_image_nonglobal(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) * The identity map is created with PMDs, regardless of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) * actual length of the kernel. We need to clear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) * _PAGE_GLOBAL up to a PMD boundary, not just to the end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) * of the image.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) unsigned long start = PFN_ALIGN(_text);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) unsigned long end = ALIGN((unsigned long)_end, PMD_PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) * This clears _PAGE_GLOBAL from the entire kernel image.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) * pti_clone_kernel_text() map put _PAGE_GLOBAL back for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) * areas that are mapped to userspace.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) set_memory_nonglobal(start, (end - start) >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) * Initialize kernel page table isolation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) void __init pti_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) if (!boot_cpu_has(X86_FEATURE_PTI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) pr_info("enabled\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) * We check for X86_FEATURE_PCID here. But the init-code will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) * clear the feature flag on 32 bit because the feature is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) * supported on 32 bit anyway. To print the warning we need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) * check with cpuid directly again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) if (cpuid_ecx(0x1) & BIT(17)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) /* Use printk to work around pr_fmt() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) printk(KERN_WARNING "\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) printk(KERN_WARNING "************************************************************\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) printk(KERN_WARNING "** WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! **\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) printk(KERN_WARNING "** **\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) printk(KERN_WARNING "** You are using 32-bit PTI on a 64-bit PCID-capable CPU. **\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) printk(KERN_WARNING "** Your performance will increase dramatically if you **\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) printk(KERN_WARNING "** switch to a 64-bit kernel! **\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) printk(KERN_WARNING "** **\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) printk(KERN_WARNING "** WARNING! WARNING! WARNING! WARNING! WARNING! WARNING! **\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) printk(KERN_WARNING "************************************************************\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) pti_clone_user_shared();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) /* Undo all global bits from the init pagetables in head_64.S: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) pti_set_kernel_image_nonglobal();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) /* Replace some of the global bits just for shared entry text: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) pti_clone_entry_text();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) pti_setup_espfix64();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) pti_setup_vsyscall();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) * Finalize the kernel mappings in the userspace page-table. Some of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) * mappings for the kernel image might have changed since pti_init()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) * cloned them. This is because parts of the kernel image have been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) * mapped RO and/or NX. These changes need to be cloned again to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) * userspace page-table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) void pti_finalize(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) if (!boot_cpu_has(X86_FEATURE_PTI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) * We need to clone everything (again) that maps parts of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) * kernel image.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) pti_clone_entry_text();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) pti_clone_kernel_text();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) debug_checkwx_user();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) }