Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags   |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright (C) 2012 ARM Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Author: Marc Zyngier <marc.zyngier@arm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #ifndef __ASM__VIRT_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #define __ASM__VIRT_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  * The arm64 hcall implementation uses x0 to specify the hcall
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  * number. A value less than HVC_STUB_HCALL_NR indicates a special
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  * hcall, such as set vector. Any other value is handled in a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  * hypervisor specific way.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  * The hypercall is allowed to clobber any of the caller-saved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  * registers (x0-x18), so it is advisable to use it through the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  * indirection of a function call (as implemented in hyp-stub.S).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  * HVC_SET_VECTORS - Set the value of the vbar_el2 register.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  * @x1: Physical address of the new vector table.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #define HVC_SET_VECTORS 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  * HVC_SOFT_RESTART - CPU soft reset, used by the cpu_soft_restart routine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #define HVC_SOFT_RESTART 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  * HVC_RESET_VECTORS - Restore the vectors to the original HYP stubs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #define HVC_RESET_VECTORS 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39)  * HVC_VHE_RESTART - Upgrade the CPU from EL1 to EL2, if possible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) #define HVC_VHE_RESTART	3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) /* Max number of HYP stub hypercalls */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) #define HVC_STUB_HCALL_NR 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) /* Error returned when an invalid stub number is passed into x0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) #define HVC_STUB_ERR	0xbadca11
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) #define BOOT_CPU_MODE_EL1	(0xe11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) #define BOOT_CPU_MODE_EL2	(0xe12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) #ifndef __ASSEMBLY__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) #include <asm/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) #include <asm/sections.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) #include <asm/sysreg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) #include <asm/cpufeature.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60)  * __boot_cpu_mode records what mode CPUs were booted in.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61)  * A correctly-implemented bootloader must start all CPUs in the same mode:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62)  * In this case, both 32bit halves of __boot_cpu_mode will contain the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63)  * same value (either 0 if booted in EL1, BOOT_CPU_MODE_EL2 if booted in EL2).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65)  * Should the bootloader fail to do this, the two values will be different.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66)  * This allows the kernel to flag an error when the secondaries have come up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) extern u32 __boot_cpu_mode[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) void __hyp_set_vectors(phys_addr_t phys_vector_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) void __hyp_reset_vectors(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) DECLARE_STATIC_KEY_FALSE(kvm_protected_mode_initialized);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) /* Reports the availability of HYP mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) static inline bool is_hyp_mode_available(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	 * If KVM protected mode is initialized, all CPUs must have been booted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	 * in EL2. Avoid checking __boot_cpu_mode as CPUs now come up in EL1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	if (IS_ENABLED(CONFIG_KVM) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	    static_branch_likely(&kvm_protected_mode_initialized))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	return (__boot_cpu_mode[0] == BOOT_CPU_MODE_EL2 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 		__boot_cpu_mode[1] == BOOT_CPU_MODE_EL2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) /* Check if the bootloader has booted CPUs in different modes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) static inline bool is_hyp_mode_mismatched(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	 * If KVM protected mode is initialized, all CPUs must have been booted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	 * in EL2. Avoid checking __boot_cpu_mode as CPUs now come up in EL1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	if (IS_ENABLED(CONFIG_KVM) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	    static_branch_likely(&kvm_protected_mode_initialized))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	return __boot_cpu_mode[0] != __boot_cpu_mode[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static inline bool is_kernel_in_hyp_mode(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	return read_sysreg(CurrentEL) == CurrentEL_EL2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) static __always_inline bool has_vhe(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	 * Code only run in VHE/NVHE hyp context can assume VHE is present or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	 * absent. Otherwise fall back to caps.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	if (is_vhe_hyp_code())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	else if (is_nvhe_hyp_code())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 		return cpus_have_final_cap(ARM64_HAS_VIRT_HOST_EXTN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) static __always_inline bool is_protected_kvm_enabled(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	if (is_vhe_hyp_code())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 		return cpus_have_final_cap(ARM64_KVM_PROTECTED_MODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #endif /* __ASSEMBLY__ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #endif /* ! __ASM__VIRT_H */