Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /* ----------------------------------------------------------------------- *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *   Copyright 2014 Intel Corporation; author: H. Peter Anvin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * ----------------------------------------------------------------------- */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * The IRET instruction, when returning to a 16-bit segment, only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  * restores the bottom 16 bits of the user space stack pointer.  This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  * causes some 16-bit software to break, but it also leaks kernel state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  * to user space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  * This works around this by creating percpu "ministacks", each of which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  * is mapped 2^16 times 64K apart.  When we detect that the return SS is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  * on the LDT, we copy the IRET frame to the ministack and use the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  * relevant alias to return to userspace.  The ministacks are mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  * readonly, so if the IRET fault we promote #GP to #DF which is an IST
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19)  * vector and thus has its own stack; we then do the fixup in the #DF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  * handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  * This file sets up the ministacks and the related page tables.  The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  * actual ministack invocation is in entry_64.S.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #include <linux/init_task.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #include <linux/random.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #include <linux/pgtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #include <asm/pgalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #include <asm/setup.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) #include <asm/espfix.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38)  * Note: we only need 6*8 = 48 bytes for the espfix stack, but round
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39)  * it up to a cache line to avoid unnecessary sharing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) #define ESPFIX_STACK_SIZE	(8*8UL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) #define ESPFIX_STACKS_PER_PAGE	(PAGE_SIZE/ESPFIX_STACK_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) /* There is address space for how many espfix pages? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) #define ESPFIX_PAGE_SPACE	(1UL << (P4D_SHIFT-PAGE_SHIFT-16))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) #define ESPFIX_MAX_CPUS		(ESPFIX_STACKS_PER_PAGE * ESPFIX_PAGE_SPACE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) #if CONFIG_NR_CPUS > ESPFIX_MAX_CPUS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) # error "Need more virtual address space for the ESPFIX hack"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) #define PGALLOC_GFP (GFP_KERNEL | __GFP_ZERO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) /* This contains the *bottom* address of the espfix stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) DEFINE_PER_CPU_READ_MOSTLY(unsigned long, espfix_stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) DEFINE_PER_CPU_READ_MOSTLY(unsigned long, espfix_waddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) /* Initialization mutex - should this be a spinlock? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) static DEFINE_MUTEX(espfix_init_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) /* Page allocation bitmap - each page serves ESPFIX_STACKS_PER_PAGE CPUs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) #define ESPFIX_MAX_PAGES  DIV_ROUND_UP(CONFIG_NR_CPUS, ESPFIX_STACKS_PER_PAGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) static void *espfix_pages[ESPFIX_MAX_PAGES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) static __page_aligned_bss pud_t espfix_pud_page[PTRS_PER_PUD]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	__aligned(PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) static unsigned int page_random, slot_random;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71)  * This returns the bottom address of the espfix stack for a specific CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72)  * The math allows for a non-power-of-two ESPFIX_STACK_SIZE, in which case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73)  * we have to account for some amount of padding at the end of each page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) static inline unsigned long espfix_base_addr(unsigned int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	unsigned long page, slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	page = (cpu / ESPFIX_STACKS_PER_PAGE) ^ page_random;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	slot = (cpu + slot_random) % ESPFIX_STACKS_PER_PAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	addr = (page << PAGE_SHIFT) + (slot * ESPFIX_STACK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	addr = (addr & 0xffffUL) | ((addr & ~0xffffUL) << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	addr += ESPFIX_BASE_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) #define PTE_STRIDE        (65536/PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) #define ESPFIX_PTE_CLONES (PTRS_PER_PTE/PTE_STRIDE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) #define ESPFIX_PMD_CLONES PTRS_PER_PMD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) #define ESPFIX_PUD_CLONES (65536/(ESPFIX_PTE_CLONES*ESPFIX_PMD_CLONES))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) #define PGTABLE_PROT	  ((_KERNPG_TABLE & ~_PAGE_RW) | _PAGE_NX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) static void init_espfix_random(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	unsigned long rand;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	 * This is run before the entropy pools are initialized,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	 * but this is hopefully better than nothing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	if (!arch_get_random_long(&rand)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 		/* The constant is an arbitrary large prime */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 		rand = rdtsc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		rand *= 0xc345c6b72fd16123UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	slot_random = rand % ESPFIX_STACKS_PER_PAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	page_random = (rand / ESPFIX_STACKS_PER_PAGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 		& (ESPFIX_PAGE_SPACE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) void __init init_espfix_bsp(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	pgd_t *pgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	p4d_t *p4d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	/* Install the espfix pud into the kernel page directory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	pgd = &init_top_pgt[pgd_index(ESPFIX_BASE_ADDR)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	p4d = p4d_alloc(&init_mm, pgd, ESPFIX_BASE_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	p4d_populate(&init_mm, p4d, espfix_pud_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	/* Randomize the locations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	init_espfix_random();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	/* The rest is the same as for any other processor */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	init_espfix_ap(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) void init_espfix_ap(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	unsigned int page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	pud_t pud, *pud_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	pmd_t pmd, *pmd_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	pte_t pte, *pte_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	int n, node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	void *stack_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	pteval_t ptemask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	/* We only have to do this once... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	if (likely(per_cpu(espfix_stack, cpu)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 		return;		/* Already initialized */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	addr = espfix_base_addr(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	page = cpu/ESPFIX_STACKS_PER_PAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	/* Did another CPU already set this up? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	stack_page = READ_ONCE(espfix_pages[page]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	if (likely(stack_page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	mutex_lock(&espfix_init_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	/* Did we race on the lock? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	stack_page = READ_ONCE(espfix_pages[page]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	if (stack_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 		goto unlock_done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	node = cpu_to_node(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	ptemask = __supported_pte_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	pud_p = &espfix_pud_page[pud_index(addr)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	pud = *pud_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	if (!pud_present(pud)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 		struct page *page = alloc_pages_node(node, PGALLOC_GFP, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		pmd_p = (pmd_t *)page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 		pud = __pud(__pa(pmd_p) | (PGTABLE_PROT & ptemask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 		paravirt_alloc_pmd(&init_mm, __pa(pmd_p) >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 		for (n = 0; n < ESPFIX_PUD_CLONES; n++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 			set_pud(&pud_p[n], pud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	pmd_p = pmd_offset(&pud, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	pmd = *pmd_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	if (!pmd_present(pmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 		struct page *page = alloc_pages_node(node, PGALLOC_GFP, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 		pte_p = (pte_t *)page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 		pmd = __pmd(__pa(pte_p) | (PGTABLE_PROT & ptemask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 		paravirt_alloc_pte(&init_mm, __pa(pte_p) >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 		for (n = 0; n < ESPFIX_PMD_CLONES; n++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 			set_pmd(&pmd_p[n], pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	pte_p = pte_offset_kernel(&pmd, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	stack_page = page_address(alloc_pages_node(node, GFP_KERNEL, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	 * __PAGE_KERNEL_* includes _PAGE_GLOBAL, which we want since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	 * this is mapped to userspace.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	pte = __pte(__pa(stack_page) | ((__PAGE_KERNEL_RO | _PAGE_ENC) & ptemask));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	for (n = 0; n < ESPFIX_PTE_CLONES; n++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 		set_pte(&pte_p[n*PTE_STRIDE], pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	/* Job is done for this CPU and any CPU which shares this page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	WRITE_ONCE(espfix_pages[page], stack_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) unlock_done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	mutex_unlock(&espfix_init_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	per_cpu(espfix_stack, cpu) = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	per_cpu(espfix_waddr, cpu) = (unsigned long)stack_page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 				      + (addr & ~PAGE_MASK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }