^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Hibernation support for x86
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (c) 2002 Pavel Machek <pavel@ucw.cz>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/suspend.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/scatterlist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/kdebug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/pgtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/crc32.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <asm/e820/api.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <asm/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <asm/proto.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <asm/mtrr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <asm/sections.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <asm/suspend.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * Address to jump to in the last phase of restore in order to get to the image
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * kernel's text (this value is passed in the image header).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) unsigned long restore_jump_address __visible;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) unsigned long jump_address_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * Value of the cr3 register from before the hibernation (this value is passed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * in the image header).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) unsigned long restore_cr3 __visible;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) unsigned long temp_pgt __visible;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) unsigned long relocated_restore_code __visible;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * pfn_is_nosave - check if given pfn is in the 'nosave' section
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) int pfn_is_nosave(unsigned long pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) unsigned long nosave_begin_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) unsigned long nosave_end_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) nosave_begin_pfn = __pa_symbol(&__nosave_begin) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) return pfn >= nosave_begin_pfn && pfn < nosave_end_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) struct restore_data_record {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) unsigned long jump_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) unsigned long jump_address_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) unsigned long cr3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) unsigned long magic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) unsigned long e820_checksum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * compute_e820_crc32 - calculate crc32 of a given e820 table
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * @table: the e820 table to be calculated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * Return: the resulting checksum
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) static inline u32 compute_e820_crc32(struct e820_table *table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) int size = offsetof(struct e820_table, entries) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) sizeof(struct e820_entry) * table->nr_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) return ~crc32_le(~0, (unsigned char const *)table, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define RESTORE_MAGIC 0x23456789ABCDEF02UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define RESTORE_MAGIC 0x12345679UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) * arch_hibernation_header_save - populate the architecture specific part
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * of a hibernation image header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * @addr: address to save the data at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) int arch_hibernation_header_save(void *addr, unsigned int max_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) struct restore_data_record *rdr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) if (max_size < sizeof(struct restore_data_record))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) return -EOVERFLOW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) rdr->magic = RESTORE_MAGIC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) rdr->jump_address = (unsigned long)restore_registers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) rdr->jump_address_phys = __pa_symbol(restore_registers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * The restore code fixes up CR3 and CR4 in the following sequence:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * [in hibernation asm]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * 1. CR3 <= temporary page tables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * 2. CR4 <= mmu_cr4_features (from the kernel that restores us)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * 3. CR3 <= rdr->cr3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * 4. CR4 <= mmu_cr4_features (from us, i.e. the image kernel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * [in restore_processor_state()]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * 5. CR4 <= saved CR4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * 6. CR3 <= saved CR3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * Our mmu_cr4_features has CR4.PCIDE=0, and toggling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * CR4.PCIDE while CR3's PCID bits are nonzero is illegal, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * rdr->cr3 needs to point to valid page tables but must not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * have any of the PCID bits set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) rdr->cr3 = restore_cr3 & ~CR3_PCID_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) rdr->e820_checksum = compute_e820_crc32(e820_table_firmware);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * arch_hibernation_header_restore - read the architecture specific data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * from the hibernation image header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * @addr: address to read the data from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) int arch_hibernation_header_restore(void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) struct restore_data_record *rdr = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) if (rdr->magic != RESTORE_MAGIC) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) pr_crit("Unrecognized hibernate image header format!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) restore_jump_address = rdr->jump_address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) jump_address_phys = rdr->jump_address_phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) restore_cr3 = rdr->cr3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) if (rdr->e820_checksum != compute_e820_crc32(e820_table_firmware)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) pr_crit("Hibernate inconsistent memory map detected!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) int relocate_restore_code(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) pgd_t *pgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) p4d_t *p4d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) pud_t *pud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) pmd_t *pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) pte_t *pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) relocated_restore_code = get_safe_page(GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) if (!relocated_restore_code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) memcpy((void *)relocated_restore_code, core_restore_code, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /* Make the page containing the relocated code executable */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) pgd = (pgd_t *)__va(read_cr3_pa()) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) pgd_index(relocated_restore_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) p4d = p4d_offset(pgd, relocated_restore_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) if (p4d_large(*p4d)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) set_p4d(p4d, __p4d(p4d_val(*p4d) & ~_PAGE_NX));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) pud = pud_offset(p4d, relocated_restore_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) if (pud_large(*pud)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) set_pud(pud, __pud(pud_val(*pud) & ~_PAGE_NX));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) pmd = pmd_offset(pud, relocated_restore_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) if (pmd_large(*pmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_NX));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) pte = pte_offset_kernel(pmd, relocated_restore_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_NX));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) __flush_tlb_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) int arch_resume_nosmt(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * We reached this while coming out of hibernation. This means
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * that SMT siblings are sleeping in hlt, as mwait is not safe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * against control transition during resume (see comment in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * hibernate_resume_nonboot_cpu_disable()).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * If the resumed kernel has SMT disabled, we have to take all the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * SMT siblings out of hlt, and offline them again so that they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * end up in mwait proper.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * Called with hotplug disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) cpu_hotplug_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) if (cpu_smt_control == CPU_SMT_DISABLED ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) cpu_smt_control == CPU_SMT_FORCE_DISABLED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) enum cpuhp_smt_control old = cpu_smt_control;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) ret = cpuhp_smt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) ret = cpuhp_smt_disable(old);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) cpu_hotplug_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }