Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags   |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Hibernate support specific for ARM64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Derived from work on ARM hibernation support by:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Ubuntu project, hibernation support for mach-dove
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * Copyright (C) 2010 Nokia Corporation (Hiroshi Doyu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * Copyright (C) 2010 Texas Instruments, Inc. (Teerth Reddy et al.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  *  https://lkml.org/lkml/2010/6/18/4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  *  https://lists.linux-foundation.org/pipermail/linux-pm/2010-June/027422.html
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  *  https://patchwork.kernel.org/patch/96442/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #define pr_fmt(x) "hibernate: " x
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/kvm_host.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/pm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <linux/suspend.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <linux/utsname.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <asm/barrier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #include <asm/cputype.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #include <asm/daifflags.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #include <asm/irqflags.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #include <asm/kexec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #include <asm/memory.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #include <asm/mmu_context.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #include <asm/mte.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #include <asm/pgalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) #include <asm/pgtable-hwdef.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #include <asm/sections.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) #include <asm/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) #include <asm/smp_plat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #include <asm/suspend.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #include <asm/sysreg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) #include <asm/virt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44)  * Hibernate core relies on this value being 0 on resume, and marks it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45)  * __nosavedata assuming it will keep the resume kernel's '0' value. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46)  * doesn't happen with either KASLR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48)  * defined as "__visible int in_suspend __nosavedata" in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49)  * kernel/power/hibernate.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) extern int in_suspend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) /* Do we need to reset el2? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) #define el2_reset_needed() (is_hyp_mode_available() && !is_kernel_in_hyp_mode())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) /* temporary el2 vectors in the __hibernate_exit_text section. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) extern char hibernate_el2_vectors[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) /* hyp-stub vectors, used to restore el2 during resume from hibernate. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) extern char __hyp_stub_vectors[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63)  * The logical cpu number we should resume on, initialised to a non-cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64)  * number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) static int sleep_cpu = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69)  * Values that may not change over hibernate/resume. We put the build number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70)  * and date in here so that we guarantee not to resume with a different
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71)  * kernel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) struct arch_hibernate_hdr_invariants {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	char		uts_version[__NEW_UTS_LEN + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) /* These values need to be know across a hibernate/restore. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) static struct arch_hibernate_hdr {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	struct arch_hibernate_hdr_invariants invariants;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	/* These are needed to find the relocated kernel if built with kaslr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	phys_addr_t	ttbr1_el1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	void		(*reenter_kernel)(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	 * We need to know where the __hyp_stub_vectors are after restore to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	 * re-configure el2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	phys_addr_t	__hyp_stub_vectors;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	u64		sleep_cpu_mpidr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) } resume_hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) static inline void arch_hdr_invariants(struct arch_hibernate_hdr_invariants *i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	memset(i, 0, sizeof(*i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	memcpy(i->uts_version, init_utsname()->version, sizeof(i->uts_version));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) int pfn_is_nosave(unsigned long pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	unsigned long nosave_begin_pfn = sym_to_pfn(&__nosave_begin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	unsigned long nosave_end_pfn = sym_to_pfn(&__nosave_end - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	return ((pfn >= nosave_begin_pfn) && (pfn <= nosave_end_pfn)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		crash_is_nosave(pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) void notrace save_processor_state(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	WARN_ON(num_online_cpus() != 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) void notrace restore_processor_state(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) int arch_hibernation_header_save(void *addr, unsigned int max_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	struct arch_hibernate_hdr *hdr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	if (max_size < sizeof(*hdr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 		return -EOVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	arch_hdr_invariants(&hdr->invariants);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	hdr->ttbr1_el1		= __pa_symbol(swapper_pg_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	hdr->reenter_kernel	= _cpu_resume;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	/* We can't use __hyp_get_vectors() because kvm may still be loaded */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	if (el2_reset_needed())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 		hdr->__hyp_stub_vectors = __pa_symbol(__hyp_stub_vectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 		hdr->__hyp_stub_vectors = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	/* Save the mpidr of the cpu we called cpu_suspend() on... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	if (sleep_cpu < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 		pr_err("Failing to hibernate on an unknown CPU.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	hdr->sleep_cpu_mpidr = cpu_logical_map(sleep_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	pr_info("Hibernating on CPU %d [mpidr:0x%llx]\n", sleep_cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 		hdr->sleep_cpu_mpidr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) EXPORT_SYMBOL(arch_hibernation_header_save);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) int arch_hibernation_header_restore(void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	struct arch_hibernate_hdr_invariants invariants;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	struct arch_hibernate_hdr *hdr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	arch_hdr_invariants(&invariants);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	if (memcmp(&hdr->invariants, &invariants, sizeof(invariants))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 		pr_crit("Hibernate image not generated by this kernel!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	sleep_cpu = get_logical_index(hdr->sleep_cpu_mpidr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	pr_info("Hibernated on CPU %d [mpidr:0x%llx]\n", sleep_cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 		hdr->sleep_cpu_mpidr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	if (sleep_cpu < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 		pr_crit("Hibernated on a CPU not known to this kernel!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 		sleep_cpu = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	ret = bringup_hibernate_cpu(sleep_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 		sleep_cpu = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	resume_hdr = *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) EXPORT_SYMBOL(arch_hibernation_header_restore);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) static int trans_pgd_map_page(pgd_t *trans_pgd, void *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 		       unsigned long dst_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 		       pgprot_t pgprot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	pgd_t *pgdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	p4d_t *p4dp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	pud_t *pudp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	pmd_t *pmdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	pte_t *ptep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	pgdp = pgd_offset_pgd(trans_pgd, dst_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	if (pgd_none(READ_ONCE(*pgdp))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 		pudp = (void *)get_safe_page(GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 		if (!pudp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 		pgd_populate(&init_mm, pgdp, pudp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	p4dp = p4d_offset(pgdp, dst_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	if (p4d_none(READ_ONCE(*p4dp))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		pudp = (void *)get_safe_page(GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 		if (!pudp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 		p4d_populate(&init_mm, p4dp, pudp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	pudp = pud_offset(p4dp, dst_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	if (pud_none(READ_ONCE(*pudp))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 		pmdp = (void *)get_safe_page(GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 		if (!pmdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 		pud_populate(&init_mm, pudp, pmdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	pmdp = pmd_offset(pudp, dst_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	if (pmd_none(READ_ONCE(*pmdp))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 		ptep = (void *)get_safe_page(GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 		if (!ptep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 		pmd_populate_kernel(&init_mm, pmdp, ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	ptep = pte_offset_kernel(pmdp, dst_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	set_pte(ptep, pfn_pte(virt_to_pfn(page), PAGE_KERNEL_EXEC));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)  * Copies length bytes, starting at src_start into an new page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)  * perform cache maintenance, then maps it at the specified address low
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)  * address as executable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)  * This is used by hibernate to copy the code it needs to execute when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)  * overwriting the kernel text. This function generates a new set of page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)  * tables, which it loads into ttbr0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)  * Length is provided as we probably only want 4K of data, even on a 64K
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)  * page system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) static int create_safe_exec_page(void *src_start, size_t length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 				 unsigned long dst_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 				 phys_addr_t *phys_dst_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	void *page = (void *)get_safe_page(GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	pgd_t *trans_pgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	memcpy(page, src_start, length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	__flush_icache_range((unsigned long)page, (unsigned long)page + length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	trans_pgd = (void *)get_safe_page(GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	if (!trans_pgd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	rc = trans_pgd_map_page(trans_pgd, page, dst_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 				PAGE_KERNEL_EXEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	 * Load our new page tables. A strict BBM approach requires that we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	 * ensure that TLBs are free of any entries that may overlap with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	 * global mappings we are about to install.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	 * For a real hibernate/resume cycle TTBR0 currently points to a zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	 * page, but TLBs may contain stale ASID-tagged entries (e.g. for EFI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	 * runtime services), while for a userspace-driven test_resume cycle it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	 * points to userspace page tables (and we must point it at a zero page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	 * ourselves). Elsewhere we only (un)install the idmap with preemption
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	 * disabled, so T0SZ should be as required regardless.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	cpu_set_reserved_ttbr0();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	local_flush_tlb_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	write_sysreg(phys_to_ttbr(virt_to_phys(trans_pgd)), ttbr0_el1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	isb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	*phys_dst_addr = virt_to_phys(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) #define dcache_clean_range(start, end)	__flush_dcache_area(start, (end - start))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) #ifdef CONFIG_ARM64_MTE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) static DEFINE_XARRAY(mte_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) static int save_tags(struct page *page, unsigned long pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	void *tag_storage, *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	tag_storage = mte_allocate_tag_storage();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	if (!tag_storage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	mte_save_page_tags(page_address(page), tag_storage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	ret = xa_store(&mte_pages, pfn, tag_storage, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	if (WARN(xa_is_err(ret), "Failed to store MTE tags")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 		mte_free_tag_storage(tag_storage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 		return xa_err(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	} else if (WARN(ret, "swsusp: %s: Duplicate entry", __func__)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 		mte_free_tag_storage(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) static void swsusp_mte_free_storage(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	XA_STATE(xa_state, &mte_pages, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	void *tags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	xa_lock(&mte_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	xas_for_each(&xa_state, tags, ULONG_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 		mte_free_tag_storage(tags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	xa_unlock(&mte_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	xa_destroy(&mte_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) static int swsusp_mte_save_tags(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	struct zone *zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	unsigned long pfn, max_zone_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	int n = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	if (!system_supports_mte())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	for_each_populated_zone(zone) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 		max_zone_pfn = zone_end_pfn(zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 			struct page *page = pfn_to_online_page(pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 			if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 			if (!test_bit(PG_mte_tagged, &page->flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 				continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 			ret = save_tags(page, pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 			if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 				swsusp_mte_free_storage();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 				goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 			n++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	pr_info("Saved %d MTE pages\n", n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) static void swsusp_mte_restore_tags(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	XA_STATE(xa_state, &mte_pages, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	int n = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	void *tags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	xa_lock(&mte_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	xas_for_each(&xa_state, tags, ULONG_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 		unsigned long pfn = xa_state.xa_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 		struct page *page = pfn_to_online_page(pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 		 * It is not required to invoke page_kasan_tag_reset(page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 		 * at this point since the tags stored in page->flags are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 		 * already restored.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 		mte_restore_page_tags(page_address(page), tags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 		mte_free_tag_storage(tags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 		n++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	xa_unlock(&mte_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	pr_info("Restored %d MTE pages\n", n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	xa_destroy(&mte_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) #else	/* CONFIG_ARM64_MTE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) static int swsusp_mte_save_tags(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) static void swsusp_mte_restore_tags(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) #endif	/* CONFIG_ARM64_MTE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) int swsusp_arch_suspend(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	struct sleep_stack_data state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	if (cpus_are_stuck_in_kernel()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 		pr_err("Can't hibernate: no mechanism to offline secondary CPUs.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 		return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	flags = local_daif_save();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	if (__cpu_suspend_enter(&state)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 		/* make the crash dump kernel image visible/saveable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 		crash_prepare_suspend();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 		ret = swsusp_mte_save_tags();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 			return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 		sleep_cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 		ret = swsusp_save();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 		/* Clean kernel core startup/idle code to PoC*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 		dcache_clean_range(__mmuoff_data_start, __mmuoff_data_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 		dcache_clean_range(__idmap_text_start, __idmap_text_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 		/* Clean kvm setup code to PoC? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 		if (el2_reset_needed()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 			dcache_clean_range(__hyp_idmap_text_start, __hyp_idmap_text_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 			dcache_clean_range(__hyp_text_start, __hyp_text_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 		swsusp_mte_restore_tags();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 		/* make the crash dump kernel image protected again */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 		crash_post_resume();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 		 * Tell the hibernation core that we've just restored
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 		 * the memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 		in_suspend = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 		sleep_cpu = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 		__cpu_suspend_exit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 		 * Just in case the boot kernel did turn the SSBD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 		 * mitigation off behind our back, let's set the state
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 		 * to what we expect it to be.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 		spectre_v4_enable_mitigation(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 	local_daif_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) static void _copy_pte(pte_t *dst_ptep, pte_t *src_ptep, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 	pte_t pte = READ_ONCE(*src_ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 	if (pte_valid(pte)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 		 * Resume will overwrite areas that may be marked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 		 * read only (code, rodata). Clear the RDONLY bit from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 		 * the temporary mappings we use during restore.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 		set_pte(dst_ptep, pte_mkwrite(pte));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 	} else if (debug_pagealloc_enabled() && !pte_none(pte)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 		 * debug_pagealloc will removed the PTE_VALID bit if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 		 * the page isn't in use by the resume kernel. It may have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 		 * been in use by the original kernel, in which case we need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 		 * to put it back in our copy to do the restore.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 		 * Before marking this entry valid, check the pfn should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 		 * be mapped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 		BUG_ON(!pfn_valid(pte_pfn(pte)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 		set_pte(dst_ptep, pte_mkpresent(pte_mkwrite(pte)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) static int copy_pte(pmd_t *dst_pmdp, pmd_t *src_pmdp, unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 		    unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 	pte_t *src_ptep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 	pte_t *dst_ptep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 	unsigned long addr = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 	dst_ptep = (pte_t *)get_safe_page(GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 	if (!dst_ptep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 	pmd_populate_kernel(&init_mm, dst_pmdp, dst_ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 	dst_ptep = pte_offset_kernel(dst_pmdp, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 	src_ptep = pte_offset_kernel(src_pmdp, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 		_copy_pte(dst_ptep, src_ptep, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 	} while (dst_ptep++, src_ptep++, addr += PAGE_SIZE, addr != end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) static int copy_pmd(pud_t *dst_pudp, pud_t *src_pudp, unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 		    unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 	pmd_t *src_pmdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 	pmd_t *dst_pmdp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 	unsigned long next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 	unsigned long addr = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 	if (pud_none(READ_ONCE(*dst_pudp))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 		dst_pmdp = (pmd_t *)get_safe_page(GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 		if (!dst_pmdp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 		pud_populate(&init_mm, dst_pudp, dst_pmdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	dst_pmdp = pmd_offset(dst_pudp, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 	src_pmdp = pmd_offset(src_pudp, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 		pmd_t pmd = READ_ONCE(*src_pmdp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 		next = pmd_addr_end(addr, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 		if (pmd_none(pmd))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 		if (pmd_table(pmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 			if (copy_pte(dst_pmdp, src_pmdp, addr, next))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 				return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 			set_pmd(dst_pmdp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 				__pmd(pmd_val(pmd) & ~PMD_SECT_RDONLY));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 	} while (dst_pmdp++, src_pmdp++, addr = next, addr != end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) static int copy_pud(p4d_t *dst_p4dp, p4d_t *src_p4dp, unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 		    unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 	pud_t *dst_pudp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 	pud_t *src_pudp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 	unsigned long next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 	unsigned long addr = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 	if (p4d_none(READ_ONCE(*dst_p4dp))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 		dst_pudp = (pud_t *)get_safe_page(GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 		if (!dst_pudp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 		p4d_populate(&init_mm, dst_p4dp, dst_pudp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 	dst_pudp = pud_offset(dst_p4dp, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 	src_pudp = pud_offset(src_p4dp, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 		pud_t pud = READ_ONCE(*src_pudp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 		next = pud_addr_end(addr, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 		if (pud_none(pud))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 		if (pud_table(pud)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 			if (copy_pmd(dst_pudp, src_pudp, addr, next))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 				return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 			set_pud(dst_pudp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 				__pud(pud_val(pud) & ~PUD_SECT_RDONLY));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 	} while (dst_pudp++, src_pudp++, addr = next, addr != end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) static int copy_p4d(pgd_t *dst_pgdp, pgd_t *src_pgdp, unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 		    unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 	p4d_t *dst_p4dp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 	p4d_t *src_p4dp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 	unsigned long next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 	unsigned long addr = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 	dst_p4dp = p4d_offset(dst_pgdp, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 	src_p4dp = p4d_offset(src_pgdp, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 		next = p4d_addr_end(addr, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 		if (p4d_none(READ_ONCE(*src_p4dp)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 		if (copy_pud(dst_p4dp, src_p4dp, addr, next))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 	} while (dst_p4dp++, src_p4dp++, addr = next, addr != end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) static int copy_page_tables(pgd_t *dst_pgdp, unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 			    unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 	unsigned long next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 	unsigned long addr = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 	pgd_t *src_pgdp = pgd_offset_k(start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 	dst_pgdp = pgd_offset_pgd(dst_pgdp, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 		next = pgd_addr_end(addr, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 		if (pgd_none(READ_ONCE(*src_pgdp)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 		if (copy_p4d(dst_pgdp, src_pgdp, addr, next))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 	} while (dst_pgdp++, src_pgdp++, addr = next, addr != end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) static int trans_pgd_create_copy(pgd_t **dst_pgdp, unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 			  unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 	pgd_t *trans_pgd = (pgd_t *)get_safe_page(GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 	if (!trans_pgd) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 		pr_err("Failed to allocate memory for temporary page tables.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 	rc = copy_page_tables(trans_pgd, start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 	if (!rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 		*dst_pgdp = trans_pgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 	return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)  * Setup then Resume from the hibernate image using swsusp_arch_suspend_exit().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)  * Memory allocated by get_safe_page() will be dealt with by the hibernate code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645)  * we don't need to free it here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) int swsusp_arch_resume(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 	int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 	void *zero_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 	size_t exit_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 	pgd_t *tmp_pg_dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 	phys_addr_t phys_hibernate_exit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 	void __noreturn (*hibernate_exit)(phys_addr_t, phys_addr_t, void *,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 					  void *, phys_addr_t, phys_addr_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 	 * Restoring the memory image will overwrite the ttbr1 page tables.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 	 * Create a second copy of just the linear map, and use this when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 	 * restoring.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 	rc = trans_pgd_create_copy(&tmp_pg_dir, PAGE_OFFSET, PAGE_END);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 	if (rc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 	 * We need a zero page that is zero before & after resume in order to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 	 * to break before make on the ttbr1 page tables.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 	zero_page = (void *)get_safe_page(GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 	if (!zero_page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 		pr_err("Failed to allocate zero page.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) 	 * Locate the exit code in the bottom-but-one page, so that *NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 	 * still has disastrous affects.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) 	hibernate_exit = (void *)PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) 	exit_size = __hibernate_exit_text_end - __hibernate_exit_text_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) 	 * Copy swsusp_arch_suspend_exit() to a safe page. This will generate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) 	 * a new set of ttbr0 page tables and load them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 	rc = create_safe_exec_page(__hibernate_exit_text_start, exit_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 				   (unsigned long)hibernate_exit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 				   &phys_hibernate_exit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 	if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 		pr_err("Failed to create safe executable page for hibernate_exit code.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) 		return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) 	 * The hibernate exit text contains a set of el2 vectors, that will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 	 * be executed at el2 with the mmu off in order to reload hyp-stub.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) 	__flush_dcache_area(hibernate_exit, exit_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) 	 * KASLR will cause the el2 vectors to be in a different location in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 	 * the resumed kernel. Load hibernate's temporary copy into el2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 	 * We can skip this step if we booted at EL1, or are running with VHE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) 	if (el2_reset_needed()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) 		phys_addr_t el2_vectors = phys_hibernate_exit;  /* base */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) 		el2_vectors += hibernate_el2_vectors -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) 			       __hibernate_exit_text_start;     /* offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) 		__hyp_set_vectors(el2_vectors);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) 	hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) 		       resume_hdr.reenter_kernel, restore_pblist,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) 		       resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) int hibernate_resume_nonboot_cpu_disable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) 	if (sleep_cpu < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) 		pr_err("Failing to resume from hibernate on an unknown CPU.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) 	return freeze_secondary_cpus(sleep_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) }