Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * AMD Memory Encryption Support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Copyright (C) 2019 SUSE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  * Author: Joerg Roedel <jroedel@suse.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #define pr_fmt(fmt)	"SEV-ES: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/sched/debug.h>	/* For show_regs() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/percpu-defs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/mem_encrypt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/printk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/mm_types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/set_memory.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) #include <asm/cpu_entry_area.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <asm/stacktrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <asm/sev-es.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <asm/insn-eval.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <asm/fpu/internal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) #include <asm/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include <asm/realmode.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include <asm/traps.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #include <asm/svm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) #include <asm/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) #include <asm/cpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34) #define DR7_RESET_VALUE        0x400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36) /* For early boot hypervisor communication in SEV-ES enabled guests */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37) static struct ghcb boot_ghcb_page __bss_decrypted __aligned(PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40)  * Needs to be in the .data section because we need it NULL before bss is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41)  * cleared
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) static struct ghcb __initdata *boot_ghcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) /* #VC handler runtime per-CPU data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) struct sev_es_runtime_data {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) 	struct ghcb ghcb_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) 	 * Reserve one page per CPU as backup storage for the unencrypted GHCB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 	 * It is needed when an NMI happens while the #VC handler uses the real
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) 	 * GHCB, and the NMI handler itself is causing another #VC exception. In
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) 	 * that case the GHCB content of the first handler needs to be backed up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 	 * and restored.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) 	struct ghcb backup_ghcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) 	 * Mark the per-cpu GHCBs as in-use to detect nested #VC exceptions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) 	 * There is no need for it to be atomic, because nothing is written to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 	 * the GHCB between the read and the write of ghcb_active. So it is safe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) 	 * to use it when a nested #VC exception happens before the write.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) 	 * This is necessary for example in the #VC->NMI->#VC case when the NMI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) 	 * happens while the first #VC handler uses the GHCB. When the NMI code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) 	 * raises a second #VC handler it might overwrite the contents of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) 	 * GHCB written by the first handler. To avoid this the content of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) 	 * GHCB is saved and restored when the GHCB is detected to be in use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 	 * already.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 	bool ghcb_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) 	bool backup_ghcb_active;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 	 * Cached DR7 value - write it on DR7 writes and return it on reads.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) 	 * That value will never make it to the real hardware DR7 as debugging
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) 	 * is currently unsupported in SEV-ES guests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) 	unsigned long dr7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) struct ghcb_state {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) 	struct ghcb *ghcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) static DEFINE_PER_CPU(struct sev_es_runtime_data*, runtime_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) DEFINE_STATIC_KEY_FALSE(sev_es_enable_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) /* Needed in vc_early_forward_exception */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) void do_early_exception(struct pt_regs *regs, int trapnr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) static __always_inline bool on_vc_stack(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 	unsigned long sp = regs->sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	/* User-mode RSP is not trusted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) 	if (user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) 	/* SYSCALL gap still has user-mode RSP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	if (ip_within_syscall_gap(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	return ((sp >= __this_cpu_ist_bottom_va(VC)) && (sp < __this_cpu_ist_top_va(VC)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108)  * This function handles the case when an NMI is raised in the #VC exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109)  * handler entry code. In this case, the IST entry for #VC must be adjusted, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110)  * that any subsequent #VC exception will not overwrite the stack contents of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111)  * interrupted #VC handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113)  * The IST entry is adjusted unconditionally so that it can be also be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114)  * unconditionally adjusted back in sev_es_ist_exit(). Otherwise a nested
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115)  * sev_es_ist_exit() call may adjust back the IST entry too early.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) void noinstr __sev_es_ist_enter(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 	unsigned long old_ist, new_ist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) 	/* Read old IST entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	old_ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	/* Make room on the IST stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	if (on_vc_stack(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 		new_ist = ALIGN_DOWN(regs->sp, 8) - sizeof(old_ist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) 		new_ist = old_ist - sizeof(old_ist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 	/* Store old IST entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) 	*(unsigned long *)new_ist = old_ist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	/* Set new IST entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], new_ist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) void noinstr __sev_es_ist_exit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	unsigned long ist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 	/* Read IST entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) 	if (WARN_ON(ist == __this_cpu_ist_top_va(VC)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) 	/* Read back old IST entry and write it to the TSS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], *(unsigned long *)ist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152)  * Nothing shall interrupt this code path while holding the per-CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153)  * GHCB. The backup GHCB is only for NMIs interrupting this path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155)  * Callers must disable local interrupts around it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) static noinstr struct ghcb *__sev_get_ghcb(struct ghcb_state *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 	struct sev_es_runtime_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 	struct ghcb *ghcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	WARN_ON(!irqs_disabled());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	data = this_cpu_read(runtime_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	ghcb = &data->ghcb_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	if (unlikely(data->ghcb_active)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 		/* GHCB is already in use - save its contents */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 		if (unlikely(data->backup_ghcb_active)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 			 * Backup-GHCB is also already in use. There is no way
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 			 * to continue here so just kill the machine. To make
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) 			 * panic() work, mark GHCBs inactive so that messages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 			 * can be printed out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) 			data->ghcb_active        = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 			data->backup_ghcb_active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) 			instrumentation_begin();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) 			panic("Unable to handle #VC exception! GHCB and Backup GHCB are already in use");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 			instrumentation_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) 		/* Mark backup_ghcb active before writing to it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) 		data->backup_ghcb_active = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 		state->ghcb = &data->backup_ghcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 		/* Backup GHCB content */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) 		*state->ghcb = *ghcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) 		state->ghcb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) 		data->ghcb_active = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	return ghcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) /* Needed in vc_early_forward_exception */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) void do_early_exception(struct pt_regs *regs, int trapnr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) static inline u64 sev_es_rd_ghcb_msr(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	return __rdmsr(MSR_AMD64_SEV_ES_GHCB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) static __always_inline void sev_es_wr_ghcb_msr(u64 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	u32 low, high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	low  = (u32)(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 	high = (u32)(val >> 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	native_wrmsr(MSR_AMD64_SEV_ES_GHCB, low, high);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) static int vc_fetch_insn_kernel(struct es_em_ctxt *ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 				unsigned char *buffer)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 	return copy_from_kernel_nofault(buffer, (unsigned char *)ctxt->regs->ip, MAX_INSN_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) static enum es_result vc_decode_insn(struct es_em_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	char buffer[MAX_INSN_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 	enum es_result ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) 	int res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 	if (user_mode(ctxt->regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 		res = insn_fetch_from_user_inatomic(ctxt->regs, buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 		if (!res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 			ctxt->fi.vector     = X86_TRAP_PF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 			ctxt->fi.error_code = X86_PF_INSTR | X86_PF_USER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 			ctxt->fi.cr2        = ctxt->regs->ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 			return ES_EXCEPTION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 		if (!insn_decode(&ctxt->insn, ctxt->regs, buffer, res))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 			return ES_DECODE_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 		res = vc_fetch_insn_kernel(ctxt, buffer);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 		if (res) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 			ctxt->fi.vector     = X86_TRAP_PF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 			ctxt->fi.error_code = X86_PF_INSTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 			ctxt->fi.cr2        = ctxt->regs->ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 			return ES_EXCEPTION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 		insn_init(&ctxt->insn, buffer, MAX_INSN_SIZE - res, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 		insn_get_length(&ctxt->insn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 	ret = ctxt->insn.immediate.got ? ES_OK : ES_DECODE_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 				   char *dst, char *buf, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 	unsigned long error_code = X86_PF_PROT | X86_PF_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 	 * This function uses __put_user() independent of whether kernel or user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 	 * memory is accessed. This works fine because __put_user() does no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 	 * sanity checks of the pointer being accessed. All that it does is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 	 * to report when the access failed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	 * Also, this function runs in atomic context, so __put_user() is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 	 * allowed to sleep. The page-fault handler detects that it is running
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	 * in atomic context and will not try to take mmap_sem and handle the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 	 * fault, so additional pagefault_enable()/disable() calls are not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 	 * needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	 * The access can't be done via copy_to_user() here because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 	 * vc_write_mem() must not use string instructions to access unsafe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 	 * memory. The reason is that MOVS is emulated by the #VC handler by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	 * splitting the move up into a read and a write and taking a nested #VC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 	 * exception on whatever of them is the MMIO access. Using string
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 	 * instructions here would cause infinite nesting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	switch (size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 	case 1: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 		u8 d1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 		u8 __user *target = (u8 __user *)dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 		memcpy(&d1, buf, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 		if (__put_user(d1, target))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 			goto fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	case 2: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 		u16 d2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 		u16 __user *target = (u16 __user *)dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 		memcpy(&d2, buf, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 		if (__put_user(d2, target))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 			goto fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 	case 4: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 		u32 d4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 		u32 __user *target = (u32 __user *)dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 		memcpy(&d4, buf, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 		if (__put_user(d4, target))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 			goto fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 	case 8: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 		u64 d8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 		u64 __user *target = (u64 __user *)dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 		memcpy(&d8, buf, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 		if (__put_user(d8, target))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 			goto fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 		WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 		return ES_UNSUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 	return ES_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) fault:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) 	if (user_mode(ctxt->regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 		error_code |= X86_PF_USER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 	ctxt->fi.vector = X86_TRAP_PF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	ctxt->fi.error_code = error_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 	ctxt->fi.cr2 = (unsigned long)dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	return ES_EXCEPTION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 				  char *src, char *buf, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) 	unsigned long error_code = X86_PF_PROT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	 * This function uses __get_user() independent of whether kernel or user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 	 * memory is accessed. This works fine because __get_user() does no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	 * sanity checks of the pointer being accessed. All that it does is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 	 * to report when the access failed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	 * Also, this function runs in atomic context, so __get_user() is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 	 * allowed to sleep. The page-fault handler detects that it is running
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 	 * in atomic context and will not try to take mmap_sem and handle the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 	 * fault, so additional pagefault_enable()/disable() calls are not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 	 * needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 	 * The access can't be done via copy_from_user() here because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	 * vc_read_mem() must not use string instructions to access unsafe
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 	 * memory. The reason is that MOVS is emulated by the #VC handler by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	 * splitting the move up into a read and a write and taking a nested #VC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 	 * exception on whatever of them is the MMIO access. Using string
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 	 * instructions here would cause infinite nesting.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	switch (size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 	case 1: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 		u8 d1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 		u8 __user *s = (u8 __user *)src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 		if (__get_user(d1, s))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 			goto fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 		memcpy(buf, &d1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 	case 2: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 		u16 d2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 		u16 __user *s = (u16 __user *)src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 		if (__get_user(d2, s))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 			goto fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 		memcpy(buf, &d2, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 	case 4: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 		u32 d4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 		u32 __user *s = (u32 __user *)src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 		if (__get_user(d4, s))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 			goto fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 		memcpy(buf, &d4, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 	case 8: {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 		u64 d8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 		u64 __user *s = (u64 __user *)src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 		if (__get_user(d8, s))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 			goto fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 		memcpy(buf, &d8, 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 		WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 		return ES_UNSUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	return ES_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) fault:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	if (user_mode(ctxt->regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 		error_code |= X86_PF_USER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 	ctxt->fi.vector = X86_TRAP_PF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 	ctxt->fi.error_code = error_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	ctxt->fi.cr2 = (unsigned long)src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 	return ES_EXCEPTION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) static enum es_result vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 					   unsigned long vaddr, phys_addr_t *paddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 	unsigned long va = (unsigned long)vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	unsigned int level;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 	phys_addr_t pa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	pgd_t *pgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 	pte_t *pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	pgd = __va(read_cr3_pa());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 	pgd = &pgd[pgd_index(va)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 	pte = lookup_address_in_pgd(pgd, va, &level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	if (!pte) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 		ctxt->fi.vector     = X86_TRAP_PF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 		ctxt->fi.cr2        = vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 		ctxt->fi.error_code = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 		if (user_mode(ctxt->regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 			ctxt->fi.error_code |= X86_PF_USER;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 		return ES_EXCEPTION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 	if (WARN_ON_ONCE(pte_val(*pte) & _PAGE_ENC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 		/* Emulated MMIO to/from encrypted memory not supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 		return ES_UNSUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	pa = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	pa |= va & ~page_level_mask(level);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	*paddr = pa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	return ES_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) /* Include code shared with pre-decompression boot stage */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) #include "sev-es-shared.c"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) static noinstr void __sev_put_ghcb(struct ghcb_state *state)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	struct sev_es_runtime_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	struct ghcb *ghcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 	WARN_ON(!irqs_disabled());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 	data = this_cpu_read(runtime_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 	ghcb = &data->ghcb_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	if (state->ghcb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 		/* Restore GHCB from Backup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 		*ghcb = *state->ghcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 		data->backup_ghcb_active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 		state->ghcb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 		 * Invalidate the GHCB so a VMGEXIT instruction issued
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 		 * from userspace won't appear to be valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 		vc_ghcb_invalidate(ghcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 		data->ghcb_active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) void noinstr __sev_es_nmi_complete(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	struct ghcb_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	struct ghcb *ghcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 	ghcb = __sev_get_ghcb(&state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	vc_ghcb_invalidate(ghcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_NMI_COMPLETE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	ghcb_set_sw_exit_info_1(ghcb, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	ghcb_set_sw_exit_info_2(ghcb, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	sev_es_wr_ghcb_msr(__pa_nodebug(ghcb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	VMGEXIT();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	__sev_put_ghcb(&state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) static u64 get_jump_table_addr(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	struct ghcb_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 	unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 	struct ghcb *ghcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	u64 ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	ghcb = __sev_get_ghcb(&state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	vc_ghcb_invalidate(ghcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_JUMP_TABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	ghcb_set_sw_exit_info_1(ghcb, SVM_VMGEXIT_GET_AP_JUMP_TABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	ghcb_set_sw_exit_info_2(ghcb, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	sev_es_wr_ghcb_msr(__pa(ghcb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	VMGEXIT();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	if (ghcb_sw_exit_info_1_is_valid(ghcb) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	    ghcb_sw_exit_info_2_is_valid(ghcb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 		ret = ghcb->save.sw_exit_info_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 	__sev_put_ghcb(&state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) int sev_es_setup_ap_jump_table(struct real_mode_header *rmh)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	u16 startup_cs, startup_ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	phys_addr_t jump_table_pa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	u64 jump_table_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	u16 __iomem *jump_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	jump_table_addr = get_jump_table_addr();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	/* On UP guests there is no jump table so this is not a failure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	if (!jump_table_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	/* Check if AP Jump Table is page-aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 	if (jump_table_addr & ~PAGE_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	jump_table_pa = jump_table_addr & PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 	startup_cs = (u16)(rmh->trampoline_start >> 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 	startup_ip = (u16)(rmh->sev_es_trampoline_start -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 			   rmh->trampoline_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 	jump_table = ioremap_encrypted(jump_table_pa, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 	if (!jump_table)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 		return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 	writew(startup_ip, &jump_table[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 	writew(startup_cs, &jump_table[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 	iounmap(jump_table);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563)  * This is needed by the OVMF UEFI firmware which will use whatever it finds in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564)  * the GHCB MSR as its GHCB to talk to the hypervisor. So make sure the per-cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565)  * runtime GHCBs used by the kernel are also mapped in the EFI page-table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) int __init sev_es_efi_map_ghcbs(pgd_t *pgd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	struct sev_es_runtime_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	unsigned long address, pflags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	u64 pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	if (!sev_es_active())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 	pflags = _PAGE_NX | _PAGE_RW;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 	for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 		data = per_cpu(runtime_data, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) 		address = __pa(&data->ghcb_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 		pfn = address >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 		if (kernel_map_pages_in_pgd(pgd, pfn, address, 1, pflags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592) static enum es_result vc_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 	struct pt_regs *regs = ctxt->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 	enum es_result ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) 	u64 exit_info_1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	/* Is it a WRMSR? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	exit_info_1 = (ctxt->insn.opcode.bytes[1] == 0x30) ? 1 : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	ghcb_set_rcx(ghcb, regs->cx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 	if (exit_info_1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 		ghcb_set_rax(ghcb, regs->ax);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 		ghcb_set_rdx(ghcb, regs->dx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_MSR, exit_info_1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 	if ((ret == ES_OK) && (!exit_info_1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 		regs->ax = ghcb->save.rax;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 		regs->dx = ghcb->save.rdx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618)  * This function runs on the first #VC exception after the kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619)  * switched to virtual addresses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) static bool __init sev_es_setup_ghcb(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	/* First make sure the hypervisor talks a supported protocol. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 	if (!sev_es_negotiate_protocol())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 	 * Clear the boot_ghcb. The first exception comes in before the bss
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 	 * section is cleared.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 	memset(&boot_ghcb_page, 0, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 	/* Alright - Make the boot-ghcb public */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	boot_ghcb = &boot_ghcb_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) #ifdef CONFIG_HOTPLUG_CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) static void sev_es_ap_hlt_loop(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 	struct ghcb_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 	struct ghcb *ghcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 	ghcb = __sev_get_ghcb(&state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 	while (true) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 		vc_ghcb_invalidate(ghcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 		ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_HLT_LOOP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 		ghcb_set_sw_exit_info_1(ghcb, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 		ghcb_set_sw_exit_info_2(ghcb, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 		sev_es_wr_ghcb_msr(__pa(ghcb));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 		VMGEXIT();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 		/* Wakeup signal? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 		if (ghcb_sw_exit_info_2_is_valid(ghcb) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 		    ghcb->save.sw_exit_info_2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 	__sev_put_ghcb(&state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666)  * Play_dead handler when running under SEV-ES. This is needed because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667)  * the hypervisor can't deliver an SIPI request to restart the AP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668)  * Instead the kernel has to issue a VMGEXIT to halt the VCPU until the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669)  * hypervisor wakes it up again.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) static void sev_es_play_dead(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	play_dead_common();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	/* IRQs now disabled */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 	sev_es_ap_hlt_loop();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 	 * If we get here, the VCPU was woken up again. Jump to CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 	 * startup code to get it back online.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 	start_cpu0();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) #else  /* CONFIG_HOTPLUG_CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) #define sev_es_play_dead	native_play_dead
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) #endif /* CONFIG_HOTPLUG_CPU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) static void __init sev_es_setup_play_dead(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 	smp_ops.play_dead = sev_es_play_dead;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) static inline void sev_es_setup_play_dead(void) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) static void __init alloc_runtime_data(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 	struct sev_es_runtime_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) 	data = memblock_alloc(sizeof(*data), PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 	if (!data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) 		panic("Can't allocate SEV-ES runtime data");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706) 	per_cpu(runtime_data, cpu) = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) static void __init init_ghcb(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 	struct sev_es_runtime_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 	data = per_cpu(runtime_data, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) 	err = early_set_memory_decrypted((unsigned long)&data->ghcb_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 					 sizeof(data->ghcb_page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) 	if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 		panic("Can't map GHCBs unencrypted");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	memset(&data->ghcb_page, 0, sizeof(data->ghcb_page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	data->ghcb_active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 	data->backup_ghcb_active = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) void __init sev_es_init_vc_handling(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 	BUILD_BUG_ON(offsetof(struct sev_es_runtime_data, ghcb_page) % PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 	if (!sev_es_active())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 	if (!sev_es_check_cpu_features())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 		panic("SEV-ES CPU Features missing");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	/* Enable SEV-ES special handling */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	static_branch_enable(&sev_es_enable_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	/* Initialize per-cpu GHCB pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 	for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 		alloc_runtime_data(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 		init_ghcb(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	sev_es_setup_play_dead();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	/* Secondary CPUs use the runtime #VC handler */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	initial_vc_handler = (unsigned long)kernel_exc_vmm_communication;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) static void __init vc_early_forward_exception(struct es_em_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	int trapnr = ctxt->fi.vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 	if (trapnr == X86_TRAP_PF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 		native_write_cr2(ctxt->fi.cr2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	ctxt->regs->orig_ax = ctxt->fi.error_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	do_early_exception(ctxt->regs, trapnr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) static long *vc_insn_get_reg(struct es_em_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 	long *reg_array;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 	int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 	reg_array = (long *)ctxt->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	offset    = insn_get_modrm_reg_off(&ctxt->insn, ctxt->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 	if (offset < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	offset /= sizeof(long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 	return reg_array + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) static long *vc_insn_get_rm(struct es_em_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 	long *reg_array;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 	int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	reg_array = (long *)ctxt->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	offset    = insn_get_modrm_rm_off(&ctxt->insn, ctxt->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	if (offset < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 	offset /= sizeof(long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	return reg_array + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) static enum es_result vc_do_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 				 unsigned int bytes, bool read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 	u64 exit_code, exit_info_1, exit_info_2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	unsigned long ghcb_pa = __pa(ghcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	enum es_result res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	phys_addr_t paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	void __user *ref;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	ref = insn_get_addr_ref(&ctxt->insn, ctxt->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	if (ref == (void __user *)-1L)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 		return ES_UNSUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	exit_code = read ? SVM_VMGEXIT_MMIO_READ : SVM_VMGEXIT_MMIO_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	res = vc_slow_virt_to_phys(ghcb, ctxt, (unsigned long)ref, &paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	if (res != ES_OK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 		if (res == ES_EXCEPTION && !read)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 			ctxt->fi.error_code |= X86_PF_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 		return res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 	exit_info_1 = paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 	/* Can never be greater than 8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 	exit_info_2 = bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 	ghcb_set_sw_scratch(ghcb, ghcb_pa + offsetof(struct ghcb, shared_buffer));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 	return sev_es_ghcb_hv_call(ghcb, ctxt, exit_code, exit_info_1, exit_info_2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) static enum es_result vc_handle_mmio_twobyte_ops(struct ghcb *ghcb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 						 struct es_em_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	struct insn *insn = &ctxt->insn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 	unsigned int bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 	enum es_result ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	int sign_byte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	long *reg_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 	switch (insn->opcode.bytes[1]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 		/* MMIO Read w/ zero-extension */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 	case 0xb6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 		bytes = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 	case 0xb7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 		if (!bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 			bytes = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 		ret = vc_do_mmio(ghcb, ctxt, bytes, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 		/* Zero extend based on operand size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 		reg_data = vc_insn_get_reg(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 		if (!reg_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 			return ES_DECODE_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 		memset(reg_data, 0, insn->opnd_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 		memcpy(reg_data, ghcb->shared_buffer, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 		/* MMIO Read w/ sign-extension */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 	case 0xbe:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 		bytes = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 	case 0xbf:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 		if (!bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 			bytes = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 		ret = vc_do_mmio(ghcb, ctxt, bytes, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 		/* Sign extend based on operand size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 		reg_data = vc_insn_get_reg(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 		if (!reg_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 			return ES_DECODE_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 		if (bytes == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 			u8 *val = (u8 *)ghcb->shared_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 			sign_byte = (*val & 0x80) ? 0xff : 0x00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 			u16 *val = (u16 *)ghcb->shared_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 			sign_byte = (*val & 0x8000) ? 0xff : 0x00;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 		memset(reg_data, sign_byte, insn->opnd_bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 		memcpy(reg_data, ghcb->shared_buffer, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 		ret = ES_UNSUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899)  * The MOVS instruction has two memory operands, which raises the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900)  * problem that it is not known whether the access to the source or the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901)  * destination caused the #VC exception (and hence whether an MMIO read
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902)  * or write operation needs to be emulated).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904)  * Instead of playing games with walking page-tables and trying to guess
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905)  * whether the source or destination is an MMIO range, split the move
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906)  * into two operations, a read and a write with only one memory operand.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907)  * This will cause a nested #VC exception on the MMIO address which can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908)  * then be handled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910)  * This implementation has the benefit that it also supports MOVS where
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911)  * source _and_ destination are MMIO regions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913)  * It will slow MOVS on MMIO down a lot, but in SEV-ES guests it is a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914)  * rare operation. If it turns out to be a performance problem the split
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915)  * operations can be moved to memcpy_fromio() and memcpy_toio().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) static enum es_result vc_handle_mmio_movs(struct es_em_ctxt *ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 					  unsigned int bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 	unsigned long ds_base, es_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 	unsigned char *src, *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	unsigned char buffer[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 	enum es_result ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	bool rep;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 	int off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 	ds_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_DS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	es_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_ES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	if (ds_base == -1L || es_base == -1L) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 		ctxt->fi.vector = X86_TRAP_GP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 		ctxt->fi.error_code = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 		return ES_EXCEPTION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 	src = ds_base + (unsigned char *)ctxt->regs->si;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 	dst = es_base + (unsigned char *)ctxt->regs->di;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 	ret = vc_read_mem(ctxt, src, buffer, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940) 	if (ret != ES_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	ret = vc_write_mem(ctxt, dst, buffer, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	if (ret != ES_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	if (ctxt->regs->flags & X86_EFLAGS_DF)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 		off = -bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 		off =  bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952) 	ctxt->regs->si += off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	ctxt->regs->di += off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	rep = insn_has_rep_prefix(&ctxt->insn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	if (rep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 		ctxt->regs->cx -= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 	if (!rep || ctxt->regs->cx == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 		return ES_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 		return ES_RETRY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) static enum es_result vc_handle_mmio(struct ghcb *ghcb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 				     struct es_em_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 	struct insn *insn = &ctxt->insn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	unsigned int bytes = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	enum es_result ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 	long *reg_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	switch (insn->opcode.bytes[0]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	/* MMIO Write */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	case 0x88:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 		bytes = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	case 0x89:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 		if (!bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 			bytes = insn->opnd_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 		reg_data = vc_insn_get_reg(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 		if (!reg_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 			return ES_DECODE_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 		memcpy(ghcb->shared_buffer, reg_data, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 		ret = vc_do_mmio(ghcb, ctxt, bytes, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 	case 0xc6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 		bytes = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 	case 0xc7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 		if (!bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 			bytes = insn->opnd_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) 		memcpy(ghcb->shared_buffer, insn->immediate1.bytes, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) 		ret = vc_do_mmio(ghcb, ctxt, bytes, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 		/* MMIO Read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) 	case 0x8a:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) 		bytes = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) 	case 0x8b:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) 		if (!bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) 			bytes = insn->opnd_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) 		ret = vc_do_mmio(ghcb, ctxt, bytes, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) 		reg_data = vc_insn_get_reg(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 		if (!reg_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 			return ES_DECODE_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 		/* Zero-extend for 32-bit operation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 		if (bytes == 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 			*reg_data = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 		memcpy(reg_data, ghcb->shared_buffer, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 		/* MOVS instruction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	case 0xa4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 		bytes = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 	case 0xa5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 		if (!bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 			bytes = insn->opnd_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 		ret = vc_handle_mmio_movs(ctxt, bytes);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 		/* Two-Byte Opcodes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	case 0x0f:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 		ret = vc_handle_mmio_twobyte_ops(ghcb, ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 		ret = ES_UNSUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) static enum es_result vc_handle_dr7_write(struct ghcb *ghcb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 					  struct es_em_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 	struct sev_es_runtime_data *data = this_cpu_read(runtime_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 	long val, *reg = vc_insn_get_rm(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 	enum es_result ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 	if (!reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 		return ES_DECODE_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 	val = *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 	/* Upper 32 bits must be written as zeroes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 	if (val >> 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 		ctxt->fi.vector = X86_TRAP_GP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 		ctxt->fi.error_code = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 		return ES_EXCEPTION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 	/* Clear out other reserved bits and set bit 10 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 	val = (val & 0xffff23ffL) | BIT(10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 	/* Early non-zero writes to DR7 are not supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 	if (!data && (val & ~DR7_RESET_VALUE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 		return ES_UNSUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 	/* Using a value of 0 for ExitInfo1 means RAX holds the value */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 	ghcb_set_rax(ghcb, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 	ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_WRITE_DR7, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 	if (ret != ES_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 	if (data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 		data->dr7 = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 	return ES_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) static enum es_result vc_handle_dr7_read(struct ghcb *ghcb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 					 struct es_em_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 	struct sev_es_runtime_data *data = this_cpu_read(runtime_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	long *reg = vc_insn_get_rm(ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) 	if (!reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 		return ES_DECODE_FAILED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) 	if (data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) 		*reg = data->dr7;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) 		*reg = DR7_RESET_VALUE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) 	return ES_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) static enum es_result vc_handle_wbinvd(struct ghcb *ghcb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) 				       struct es_em_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) 	return sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_WBINVD, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) static enum es_result vc_handle_rdpmc(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) 	enum es_result ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 	ghcb_set_rcx(ghcb, ctxt->regs->cx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 	ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_RDPMC, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 	if (ret != ES_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	if (!(ghcb_rax_is_valid(ghcb) && ghcb_rdx_is_valid(ghcb)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 		return ES_VMM_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 	ctxt->regs->ax = ghcb->save.rax;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 	ctxt->regs->dx = ghcb->save.rdx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 	return ES_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) static enum es_result vc_handle_monitor(struct ghcb *ghcb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 					struct es_em_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	 * Treat it as a NOP and do not leak a physical address to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 	 * hypervisor.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 	return ES_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) static enum es_result vc_handle_mwait(struct ghcb *ghcb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 				      struct es_em_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 	/* Treat the same as MONITOR/MONITORX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 	return ES_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) static enum es_result vc_handle_vmmcall(struct ghcb *ghcb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 					struct es_em_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 	enum es_result ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 	ghcb_set_rax(ghcb, ctxt->regs->ax);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 	ghcb_set_cpl(ghcb, user_mode(ctxt->regs) ? 3 : 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 	if (x86_platform.hyper.sev_es_hcall_prepare)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 		x86_platform.hyper.sev_es_hcall_prepare(ghcb, ctxt->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 	ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_VMMCALL, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 	if (ret != ES_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 	if (!ghcb_rax_is_valid(ghcb))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 		return ES_VMM_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 	ctxt->regs->ax = ghcb->save.rax;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 	 * Call sev_es_hcall_finish() after regs->ax is already set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 	 * This allows the hypervisor handler to overwrite it again if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 	 * necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 	if (x86_platform.hyper.sev_es_hcall_finish &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 	    !x86_platform.hyper.sev_es_hcall_finish(ghcb, ctxt->regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 		return ES_VMM_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 	return ES_OK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) static enum es_result vc_handle_trap_ac(struct ghcb *ghcb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 					struct es_em_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 	 * Calling ecx_alignment_check() directly does not work, because it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 	 * enables IRQs and the GHCB is active. Forward the exception and call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 	 * it later from vc_forward_exception().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 	ctxt->fi.vector = X86_TRAP_AC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 	ctxt->fi.error_code = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	return ES_EXCEPTION;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) static enum es_result vc_handle_exitcode(struct es_em_ctxt *ctxt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 					 struct ghcb *ghcb,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) 					 unsigned long exit_code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) 	enum es_result result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 	switch (exit_code) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 	case SVM_EXIT_READ_DR7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) 		result = vc_handle_dr7_read(ghcb, ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	case SVM_EXIT_WRITE_DR7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 		result = vc_handle_dr7_write(ghcb, ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 	case SVM_EXIT_EXCP_BASE + X86_TRAP_AC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 		result = vc_handle_trap_ac(ghcb, ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 	case SVM_EXIT_RDTSC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 	case SVM_EXIT_RDTSCP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 		result = vc_handle_rdtsc(ghcb, ctxt, exit_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 	case SVM_EXIT_RDPMC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 		result = vc_handle_rdpmc(ghcb, ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 	case SVM_EXIT_INVD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 		pr_err_ratelimited("#VC exception for INVD??? Seriously???\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 		result = ES_UNSUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 	case SVM_EXIT_CPUID:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 		result = vc_handle_cpuid(ghcb, ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 	case SVM_EXIT_IOIO:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 		result = vc_handle_ioio(ghcb, ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) 	case SVM_EXIT_MSR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 		result = vc_handle_msr(ghcb, ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) 	case SVM_EXIT_VMMCALL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) 		result = vc_handle_vmmcall(ghcb, ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) 	case SVM_EXIT_WBINVD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) 		result = vc_handle_wbinvd(ghcb, ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) 	case SVM_EXIT_MONITOR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) 		result = vc_handle_monitor(ghcb, ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) 	case SVM_EXIT_MWAIT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) 		result = vc_handle_mwait(ghcb, ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) 	case SVM_EXIT_NPF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 		result = vc_handle_mmio(ghcb, ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 		 * Unexpected #VC exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 		result = ES_UNSUPPORTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 	return result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) static __always_inline void vc_forward_exception(struct es_em_ctxt *ctxt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) 	long error_code = ctxt->fi.error_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	int trapnr = ctxt->fi.vector;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 	ctxt->regs->orig_ax = ctxt->fi.error_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) 	switch (trapnr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) 	case X86_TRAP_GP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) 		exc_general_protection(ctxt->regs, error_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) 	case X86_TRAP_UD:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) 		exc_invalid_op(ctxt->regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) 	case X86_TRAP_PF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) 		write_cr2(ctxt->fi.cr2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) 		exc_page_fault(ctxt->regs, error_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) 	case X86_TRAP_AC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) 		exc_alignment_check(ctxt->regs, error_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) 		pr_emerg("Unsupported exception in #VC instruction emulation - can't continue\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) static __always_inline bool on_vc_fallback_stack(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 	unsigned long sp = (unsigned long)regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 	return (sp >= __this_cpu_ist_bottom_va(VC2) && sp < __this_cpu_ist_top_va(VC2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) static bool vc_raw_handle_exception(struct pt_regs *regs, unsigned long error_code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 	struct ghcb_state state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 	struct es_em_ctxt ctxt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 	enum es_result result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 	struct ghcb *ghcb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 	bool ret = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 	ghcb = __sev_get_ghcb(&state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 	vc_ghcb_invalidate(ghcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 	result = vc_init_em_ctxt(&ctxt, regs, error_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 	if (result == ES_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 		result = vc_handle_exitcode(&ctxt, ghcb, error_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 	__sev_put_ghcb(&state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 	/* Done - now check the result */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 	switch (result) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 	case ES_OK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 		vc_finish_insn(&ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 	case ES_UNSUPPORTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 		pr_err_ratelimited("Unsupported exit-code 0x%02lx in early #VC exception (IP: 0x%lx)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 				   error_code, regs->ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 		ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 	case ES_VMM_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 		pr_err_ratelimited("Failure in communication with VMM (exit-code 0x%02lx IP: 0x%lx)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 				   error_code, regs->ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 		ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) 	case ES_DECODE_FAILED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 		pr_err_ratelimited("Failed to decode instruction (exit-code 0x%02lx IP: 0x%lx)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) 				   error_code, regs->ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 		ret = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) 	case ES_EXCEPTION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) 		vc_forward_exception(&ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) 	case ES_RETRY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) 		/* Nothing to do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) 		pr_emerg("Unknown result in %s():%d\n", __func__, result);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) 		 * Emulating the instruction which caused the #VC exception
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) 		 * failed - can't continue so print debug information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) static __always_inline bool vc_is_db(unsigned long error_code)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) 	return error_code == SVM_EXIT_EXCP_BASE + X86_TRAP_DB;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346)  * Runtime #VC exception handler when raised from kernel mode. Runs in NMI mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347)  * and will panic when an error happens.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) DEFINE_IDTENTRY_VC_KERNEL(exc_vmm_communication)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	irqentry_state_t irq_state;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	 * With the current implementation it is always possible to switch to a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 	 * safe stack because #VC exceptions only happen at known places, like
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 	 * intercepted instructions or accesses to MMIO areas/IO ports. They can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 	 * also happen with code instrumentation when the hypervisor intercepts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 	 * #DB, but the critical paths are forbidden to be instrumented, so #DB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 	 * exceptions currently also only happen in safe places.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 	 * But keep this here in case the noinstr annotations are violated due
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 	 * to bug elsewhere.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 	if (unlikely(on_vc_fallback_stack(regs))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 		instrumentation_begin();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 		panic("Can't handle #VC exception from unsupported context\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 		instrumentation_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 	 * Handle #DB before calling into !noinstr code to avoid recursive #DB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 	if (vc_is_db(error_code)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 		exc_debug(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 	irq_state = irqentry_nmi_enter(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 	instrumentation_begin();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 	if (!vc_raw_handle_exception(regs, error_code)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 		/* Show some debug info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 		show_regs(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) 		/* Ask hypervisor to sev_es_terminate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 		sev_es_terminate(GHCB_SEV_ES_REASON_GENERAL_REQUEST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 		/* If that fails and we get here - just panic */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 		panic("Returned from Terminate-Request to Hypervisor\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 	instrumentation_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 	irqentry_nmi_exit(regs, irq_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398)  * Runtime #VC exception handler when raised from user mode. Runs in IRQ mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399)  * and will kill the current task with SIGBUS when an error happens.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) DEFINE_IDTENTRY_VC_USER(exc_vmm_communication)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) 	 * Handle #DB before calling into !noinstr code to avoid recursive #DB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) 	if (vc_is_db(error_code)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) 		noist_exc_debug(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) 	irqentry_enter_from_user_mode(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) 	instrumentation_begin();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 	if (!vc_raw_handle_exception(regs, error_code)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) 		 * Do not kill the machine if user-space triggered the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 		 * exception. Send SIGBUS instead and let user-space deal with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 		 * it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 		force_sig_fault(SIGBUS, BUS_OBJERR, (void __user *)0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 	instrumentation_end();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) 	irqentry_exit_to_user_mode(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) bool __init handle_vc_boot_ghcb(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) 	unsigned long exit_code = regs->orig_ax;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) 	struct es_em_ctxt ctxt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) 	enum es_result result;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) 	/* Do initial setup or terminate the guest */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) 	if (unlikely(boot_ghcb == NULL && !sev_es_setup_ghcb()))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) 		sev_es_terminate(GHCB_SEV_ES_REASON_GENERAL_REQUEST);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) 	vc_ghcb_invalidate(boot_ghcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 	result = vc_init_em_ctxt(&ctxt, regs, exit_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 	if (result == ES_OK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) 		result = vc_handle_exitcode(&ctxt, boot_ghcb, exit_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) 	/* Done - now check the result */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) 	switch (result) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) 	case ES_OK:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) 		vc_finish_insn(&ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) 	case ES_UNSUPPORTED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) 		early_printk("PANIC: Unsupported exit-code 0x%02lx in early #VC exception (IP: 0x%lx)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) 				exit_code, regs->ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) 	case ES_VMM_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) 		early_printk("PANIC: Failure in communication with VMM (exit-code 0x%02lx IP: 0x%lx)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) 				exit_code, regs->ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) 	case ES_DECODE_FAILED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) 		early_printk("PANIC: Failed to decode instruction (exit-code 0x%02lx IP: 0x%lx)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) 				exit_code, regs->ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) 		goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) 	case ES_EXCEPTION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) 		vc_early_forward_exception(&ctxt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) 	case ES_RETRY:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 		/* Nothing to do */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 	show_regs(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 	while (true)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 		halt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) }