Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * Linux-specific definitions for managing interactions with Microsoft's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Hyper-V hypervisor. The definitions in this file are architecture
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * independent. See arch/<arch>/include/asm/mshyperv.h for definitions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * that are specific to architecture <arch>.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * Definitions that are specified in the Hyper-V Top Level Functional
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  * Spec (TLFS) should not go in this file, but should instead go in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  * hyperv-tlfs.h.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  * Copyright (C) 2019, Microsoft, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  * Author : Michael Kelley <mikelley@microsoft.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #ifndef _ASM_GENERIC_MSHYPERV_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #define _ASM_GENERIC_MSHYPERV_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <linux/atomic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <linux/cpumask.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <asm/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include <asm/hyperv-tlfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) struct ms_hyperv_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	u32 features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	u32 misc_features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	u32 hints;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	u32 nested_features;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	u32 max_vp_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	u32 max_lp_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) extern struct ms_hyperv_info ms_hyperv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) extern u64 hv_do_hypercall(u64 control, void *inputaddr, void *outputaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) extern u64 hv_do_fast_hypercall8(u16 control, u64 input8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) /* Generate the guest OS identifier as described in the Hyper-V TLFS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) static inline  __u64 generate_guest_id(__u64 d_info1, __u64 kernel_version,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 				       __u64 d_info2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	__u64 guest_id = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	guest_id = (((__u64)HV_LINUX_VENDOR_ID) << 48);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	guest_id |= (d_info1 << 48);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	guest_id |= (kernel_version << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	guest_id |= d_info2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	return guest_id;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) /* Free the message slot and signal end-of-message if required */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	 * On crash we're reading some other CPU's message page and we need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	 * to be careful: this other CPU may already had cleared the header
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	 * and the host may already had delivered some other message there.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	 * In case we blindly write msg->header.message_type we're going
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	 * to lose it. We can still lose a message of the same type but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	 * we count on the fact that there can only be one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	 * CHANNELMSG_UNLOAD_RESPONSE and we don't care about other messages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	 * on crash.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	if (cmpxchg(&msg->header.message_type, old_msg_type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 		    HVMSG_NONE) != old_msg_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	 * The cmxchg() above does an implicit memory barrier to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	 * ensure the write to MessageType (ie set to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	 * HVMSG_NONE) happens before we read the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	 * MessagePending and EOMing. Otherwise, the EOMing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	 * will not deliver any more messages since there is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	 * no empty slot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	if (msg->header.message_flags.msg_pending) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 		 * This will cause message queue rescan to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 		 * possibly deliver another msg from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 		 * hypervisor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 		hv_signal_eom();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) int hv_setup_vmbus_irq(int irq, void (*handler)(void));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) void hv_remove_vmbus_irq(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) void hv_enable_vmbus_irq(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) void hv_disable_vmbus_irq(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) void hv_setup_kexec_handler(void (*handler)(void));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) void hv_remove_kexec_handler(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) void hv_setup_crash_handler(void (*handler)(struct pt_regs *regs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) void hv_remove_crash_handler(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) extern int vmbus_interrupt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #if IS_ENABLED(CONFIG_HYPERV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)  * Hypervisor's notion of virtual processor ID is different from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)  * Linux' notion of CPU ID. This information can only be retrieved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)  * in the context of the calling CPU. Setup a map for easy access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)  * to this information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) extern u32 *hv_vp_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) extern u32 hv_max_vp_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) /* Sentinel value for an uninitialized entry in hv_vp_index array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #define VP_INVAL	U32_MAX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)  * hv_cpu_number_to_vp_number() - Map CPU to VP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)  * @cpu_number: CPU number in Linux terms
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)  * This function returns the mapping between the Linux processor
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)  * number and the hypervisor's virtual processor number, useful
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)  * in making hypercalls and such that talk about specific
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)  * processors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)  * Return: Virtual processor number in Hyper-V terms
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) static inline int hv_cpu_number_to_vp_number(int cpu_number)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	return hv_vp_index[cpu_number];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) static inline int cpumask_to_vpset(struct hv_vpset *vpset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 				    const struct cpumask *cpus)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	int cpu, vcpu, vcpu_bank, vcpu_offset, nr_bank = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	/* valid_bank_mask can represent up to 64 banks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	if (hv_max_vp_index / 64 >= 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	 * Clear all banks up to the maximum possible bank as hv_tlb_flush_ex
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	 * structs are not cleared between calls, we risk flushing unneeded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	 * vCPUs otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	for (vcpu_bank = 0; vcpu_bank <= hv_max_vp_index / 64; vcpu_bank++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 		vpset->bank_contents[vcpu_bank] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	 * Some banks may end up being empty but this is acceptable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	for_each_cpu(cpu, cpus) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 		vcpu = hv_cpu_number_to_vp_number(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 		if (vcpu == VP_INVAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 			return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 		vcpu_bank = vcpu / 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 		vcpu_offset = vcpu % 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 		__set_bit(vcpu_offset, (unsigned long *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 			  &vpset->bank_contents[vcpu_bank]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 		if (vcpu_bank >= nr_bank)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 			nr_bank = vcpu_bank + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	vpset->valid_bank_mask = GENMASK_ULL(nr_bank - 1, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	return nr_bank;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) void hyperv_report_panic_msg(phys_addr_t pa, size_t size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) bool hv_is_hyperv_initialized(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) bool hv_is_hibernation_supported(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) void hyperv_cleanup(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) #else /* CONFIG_HYPERV */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) static inline bool hv_is_hyperv_initialized(void) { return false; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) static inline bool hv_is_hibernation_supported(void) { return false; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) static inline void hyperv_cleanup(void) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) #endif /* CONFIG_HYPERV */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) #if IS_ENABLED(CONFIG_HYPERV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) extern int hv_setup_stimer0_irq(int *irq, int *vector, void (*handler)(void));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) extern void hv_remove_stimer0_irq(int irq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) #endif