Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Suspend support specific for i386/x86-64.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Copyright (c) 2002 Pavel Machek <pavel@ucw.cz>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/suspend.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/perf_event.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/tboot.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/dmi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/pgtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <asm/proto.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <asm/mtrr.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <asm/mce.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <asm/suspend.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <asm/fpu/internal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <asm/debugreg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <asm/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include <asm/mmu_context.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #include <asm/cpu_device_id.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) __visible unsigned long saved_context_ebx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) __visible unsigned long saved_context_esp, saved_context_ebp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) __visible unsigned long saved_context_esi, saved_context_edi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) __visible unsigned long saved_context_eflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) struct saved_context saved_context;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) static void msr_save_context(struct saved_context *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	struct saved_msr *msr = ctxt->saved_msrs.array;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	struct saved_msr *end = msr + ctxt->saved_msrs.num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	while (msr < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 		msr->valid = !rdmsrl_safe(msr->info.msr_no, &msr->info.reg.q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 		msr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) static void msr_restore_context(struct saved_context *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	struct saved_msr *msr = ctxt->saved_msrs.array;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	struct saved_msr *end = msr + ctxt->saved_msrs.num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	while (msr < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 		if (msr->valid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 			wrmsrl(msr->info.msr_no, msr->info.reg.q);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 		msr++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61)  *	__save_processor_state - save CPU registers before creating a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62)  *		hibernation image and before restoring the memory state from it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63)  *	@ctxt - structure to store the registers contents in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65)  *	NOTE: If there is a CPU register the modification of which by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66)  *	boot kernel (ie. the kernel used for loading the hibernation image)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67)  *	might affect the operations of the restored target kernel (ie. the one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68)  *	saved in the hibernation image), then its contents must be saved by this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69)  *	function.  In other words, if kernel A is hibernated and different
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70)  *	kernel B is used for loading the hibernation image into memory, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71)  *	kernel A's __save_processor_state() function must save all registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72)  *	needed by kernel A, so that it can operate correctly after the resume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73)  *	regardless of what kernel B does in the meantime.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) static void __save_processor_state(struct saved_context *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	mtrr_save_fixed_ranges(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	kernel_fpu_begin();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	 * descriptor tables
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	store_idt(&ctxt->idt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	 * We save it here, but restore it only in the hibernate case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	 * For ACPI S3 resume, this is loaded via 'early_gdt_desc' in 64-bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	 * mode in "secondary_startup_64". In 32-bit mode it is done via
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	 * 'pmode_gdt' in wakeup_start.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	ctxt->gdt_desc.size = GDT_SIZE - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	ctxt->gdt_desc.address = (unsigned long)get_cpu_gdt_rw(smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	store_tr(ctxt->tr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	/* XMM0..XMM15 should be handled by kernel_fpu_begin(). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	 * segment registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #ifdef CONFIG_X86_32_LAZY_GS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	savesegment(gs, ctxt->gs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	savesegment(gs, ctxt->gs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	savesegment(fs, ctxt->fs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	savesegment(ds, ctxt->ds);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	savesegment(es, ctxt->es);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	rdmsrl(MSR_FS_BASE, ctxt->fs_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	rdmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	rdmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	mtrr_save_fixed_ranges(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	rdmsrl(MSR_EFER, ctxt->efer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	 * control registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	ctxt->cr0 = read_cr0();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	ctxt->cr2 = read_cr2();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	ctxt->cr3 = __read_cr3();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	ctxt->cr4 = __read_cr4();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	ctxt->misc_enable_saved = !rdmsrl_safe(MSR_IA32_MISC_ENABLE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 					       &ctxt->misc_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	msr_save_context(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) /* Needed by apm.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) void save_processor_state(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	__save_processor_state(&saved_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	x86_platform.save_sched_clock_state();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) EXPORT_SYMBOL(save_processor_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) static void do_fpu_end(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	 * Restore FPU regs if necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	kernel_fpu_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) static void fix_processor_context(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	int cpu = smp_processor_id();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	struct desc_struct *desc = get_cpu_gdt_rw(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	tss_desc tss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	 * We need to reload TR, which requires that we change the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	 * GDT entry to indicate "available" first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	 * XXX: This could probably all be replaced by a call to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	 * force_reload_TR().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	syscall_init();				/* This sets MSR_*STAR and related */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	if (boot_cpu_has(X86_FEATURE_SEP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 		enable_sep_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	load_TR_desc();				/* This does ltr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	load_mm_ldt(current->active_mm);	/* This does lldt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	initialize_tlbstate_and_flush();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	fpu__resume_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	/* The processor is back on the direct GDT, load back the fixmap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	load_fixmap_gdt(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)  * __restore_processor_state - restore the contents of CPU registers saved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)  *                             by __save_processor_state()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)  * @ctxt - structure to load the registers contents from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)  * The asm code that gets us here will have restored a usable GDT, although
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)  * it will be pointing to the wrong alias.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) static void notrace __restore_processor_state(struct saved_context *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	struct cpuinfo_x86 *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	if (ctxt->misc_enable_saved)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 		wrmsrl(MSR_IA32_MISC_ENABLE, ctxt->misc_enable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	 * control registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	/* cr4 was introduced in the Pentium CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	if (ctxt->cr4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 		__write_cr4(ctxt->cr4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) /* CONFIG X86_64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	wrmsrl(MSR_EFER, ctxt->efer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	__write_cr4(ctxt->cr4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	write_cr3(ctxt->cr3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	write_cr2(ctxt->cr2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	write_cr0(ctxt->cr0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	/* Restore the IDT. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	load_idt(&ctxt->idt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	 * Just in case the asm code got us here with the SS, DS, or ES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	 * out of sync with the GDT, update them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	loadsegment(ss, __KERNEL_DS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	loadsegment(ds, __USER_DS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	loadsegment(es, __USER_DS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	 * Restore percpu access.  Percpu access can happen in exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	 * handlers or in complicated helpers like load_gs_index().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	wrmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	loadsegment(fs, __KERNEL_PERCPU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	loadsegment(gs, __KERNEL_STACK_CANARY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	/* Restore the TSS, RO GDT, LDT, and usermode-relevant MSRs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	fix_processor_context();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	 * Now that we have descriptor tables fully restored and working
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	 * exception handling, restore the usermode segments.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	loadsegment(ds, ctxt->es);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	loadsegment(es, ctxt->es);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	loadsegment(fs, ctxt->fs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	load_gs_index(ctxt->gs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	 * Restore FSBASE and GSBASE after restoring the selectors, since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	 * restoring the selectors clobbers the bases.  Keep in mind
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	 * that MSR_KERNEL_GS_BASE is horribly misnamed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	wrmsrl(MSR_FS_BASE, ctxt->fs_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	wrmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) #elif defined(CONFIG_X86_32_LAZY_GS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	loadsegment(gs, ctxt->gs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	do_fpu_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	tsc_verify_tsc_adjust(true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	x86_platform.restore_sched_clock_state();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	mtrr_bp_restore();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	perf_restore_debug_store();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	msr_restore_context(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	c = &cpu_data(smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	if (cpu_has(c, X86_FEATURE_MSR_IA32_FEAT_CTL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 		init_ia32_feat_ctl(c);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) /* Needed by apm.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) void notrace restore_processor_state(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) #ifdef __clang__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	// The following code snippet is copied from __restore_processor_state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	// Its purpose is to prepare GS segment before the function is called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	// Since the function is compiled with SCS on, it will use GS at its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	// entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	// TODO: Hack to be removed later when compiler bug is fixed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	wrmsrl(MSR_GS_BASE, saved_context.kernelmode_gs_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	loadsegment(fs, __KERNEL_PERCPU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	loadsegment(gs, __KERNEL_STACK_CANARY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	__restore_processor_state(&saved_context);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) EXPORT_SYMBOL(restore_processor_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) #if defined(CONFIG_HIBERNATION) && defined(CONFIG_HOTPLUG_CPU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) static void resume_play_dead(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	play_dead_common();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	tboot_shutdown(TB_SHUTDOWN_WFS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	hlt_play_dead();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) int hibernate_resume_nonboot_cpu_disable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	void (*play_dead)(void) = smp_ops.play_dead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	 * Ensure that MONITOR/MWAIT will not be used in the "play dead" loop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	 * during hibernate image restoration, because it is likely that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	 * monitored address will be actually written to at that time and then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	 * the "dead" CPU will attempt to execute instructions again, but the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	 * address in its instruction pointer may not be possible to resolve
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	 * any more at that point (the page tables used by it previously may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	 * have been overwritten by hibernate image data).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	 * First, make sure that we wake up all the potentially disabled SMT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	 * threads which have been initially brought up and then put into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	 * mwait/cpuidle sleep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	 * Those will be put to proper (not interfering with hibernation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	 * resume) sleep afterwards, and the resumed kernel will decide itself
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	 * what to do with them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	ret = cpuhp_smt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	smp_ops.play_dead = resume_play_dead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	ret = freeze_secondary_cpus(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	smp_ops.play_dead = play_dead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)  * When bsp_check() is called in hibernate and suspend, cpu hotplug
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337)  * is disabled already. So it's unnessary to handle race condition between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)  * cpumask query and cpu hotplug.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) static int bsp_check(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	if (cpumask_first(cpu_online_mask) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 		pr_warn("CPU0 is offline.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 		return -ENODEV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) static int bsp_pm_callback(struct notifier_block *nb, unsigned long action,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 			   void *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	switch (action) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	case PM_SUSPEND_PREPARE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	case PM_HIBERNATION_PREPARE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 		ret = bsp_check();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) #ifdef CONFIG_DEBUG_HOTPLUG_CPU0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	case PM_RESTORE_PREPARE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 		 * When system resumes from hibernation, online CPU0 because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 		 * 1. it's required for resume and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 		 * 2. the CPU was online before hibernation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 		if (!cpu_online(0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 			_debug_hotplug_cpu(0, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	case PM_POST_RESTORE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 		 * When a resume really happens, this code won't be called.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 		 * This code is called only when user space hibernation software
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 		 * prepares for snapshot device during boot time. So we just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 		 * call _debug_hotplug_cpu() to restore to CPU0's state prior to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 		 * preparing the snapshot device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 		 * This works for normal boot case in our CPU0 hotplug debug
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 		 * mode, i.e. CPU0 is offline and user mode hibernation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 		 * software initializes during boot time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 		 * If CPU0 is online and user application accesses snapshot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 		 * device after boot time, this will offline CPU0 and user may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 		 * see different CPU0 state before and after accessing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 		 * the snapshot device. But hopefully this is not a case when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 		 * user debugging CPU0 hotplug. Even if users hit this case,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 		 * they can easily online CPU0 back.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 		 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 		 * To simplify this debug code, we only consider normal boot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 		 * case. Otherwise we need to remember CPU0's state and restore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 		 * to that state and resolve racy conditions etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 		_debug_hotplug_cpu(0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	return notifier_from_errno(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) static int __init bsp_pm_check_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	 * Set this bsp_pm_callback as lower priority than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	 * cpu_hotplug_pm_callback. So cpu_hotplug_pm_callback will be called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	 * earlier to disable cpu hotplug before bsp online check.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	pm_notifier(bsp_pm_callback, -INT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) core_initcall(bsp_pm_check_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) static int msr_build_context(const u32 *msr_id, const int num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	struct saved_msrs *saved_msrs = &saved_context.saved_msrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	struct saved_msr *msr_array;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	int total_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	total_num = saved_msrs->num + num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 	msr_array = kmalloc_array(total_num, sizeof(struct saved_msr), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 	if (!msr_array) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 		pr_err("x86/pm: Can not allocate memory to save/restore MSRs during suspend.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 	if (saved_msrs->array) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 		 * Multiple callbacks can invoke this function, so copy any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 		 * MSR save requests from previous invocations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 		memcpy(msr_array, saved_msrs->array,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 		       sizeof(struct saved_msr) * saved_msrs->num);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 		kfree(saved_msrs->array);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 	for (i = saved_msrs->num, j = 0; i < total_num; i++, j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 		msr_array[i].info.msr_no	= msr_id[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 		msr_array[i].valid		= false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 		msr_array[i].info.reg.q		= 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	saved_msrs->num   = total_num;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 	saved_msrs->array = msr_array;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)  * The following sections are a quirk framework for problematic BIOSen:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455)  * Sometimes MSRs are modified by the BIOSen after suspended to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)  * RAM, this might cause unexpected behavior after wakeup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457)  * Thus we save/restore these specified MSRs across suspend/resume
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)  * in order to work around it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)  * For any further problematic BIOSen/platforms,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)  * please add your own function similar to msr_initialize_bdw.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) static int msr_initialize_bdw(const struct dmi_system_id *d)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 	/* Add any extra MSR ids into this array. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 	u32 bdw_msr_id[] = { MSR_IA32_THERM_CONTROL };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	pr_info("x86/pm: %s detected, MSR saving is needed during suspending.\n", d->ident);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 	return msr_build_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) static const struct dmi_system_id msr_save_dmi_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 	 .callback = msr_initialize_bdw,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	 .ident = "BROADWELL BDX_EP",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 	 .matches = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 		DMI_MATCH(DMI_PRODUCT_NAME, "GRANTLEY"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 		DMI_MATCH(DMI_PRODUCT_VERSION, "E63448-400"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 		},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 	{}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) static int msr_save_cpuid_features(const struct x86_cpu_id *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 	u32 cpuid_msr_id[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 		MSR_AMD64_CPUID_FN_1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 	};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	pr_info("x86/pm: family %#hx cpu detected, MSR saving is needed during suspending.\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 		c->family);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 	return msr_build_context(cpuid_msr_id, ARRAY_SIZE(cpuid_msr_id));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) static const struct x86_cpu_id msr_save_cpu_table[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 	X86_MATCH_VENDOR_FAM(AMD, 0x15, &msr_save_cpuid_features),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 	X86_MATCH_VENDOR_FAM(AMD, 0x16, &msr_save_cpuid_features),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 	{}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) typedef int (*pm_cpu_match_t)(const struct x86_cpu_id *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) static int pm_cpu_check(const struct x86_cpu_id *c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 	const struct x86_cpu_id *m;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 	int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 	m = x86_match_cpu(msr_save_cpu_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 	if (m) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 		pm_cpu_match_t fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 		fn = (pm_cpu_match_t)m->driver_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 		ret = fn(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) static int pm_check_save_msr(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 	dmi_check_system(msr_save_dmi_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 	pm_cpu_check(msr_save_cpu_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) device_initcall(pm_check_save_msr);