Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*  Paravirtualization interfaces
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)     Copyright (C) 2006 Rusty Russell IBM Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)     2007 - x86_64 support added by Glauber de Oliveira Costa, Red Hat Inc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/efi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/bcd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/kprobes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/pgtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <asm/bug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <asm/paravirt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <asm/debugreg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <asm/desc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <asm/setup.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <asm/time.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <asm/pgalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <asm/irq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include <asm/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #include <asm/fixmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #include <asm/apic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #include <asm/timer.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #include <asm/special_insns.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #include <asm/tlb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #include <asm/io_bitmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36)  * nop stub, which must not clobber anything *including the stack* to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37)  * avoid confusing the entry prologues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) extern void _paravirt_nop(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) asm (".pushsection .entry.text, \"ax\"\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41)      ".global _paravirt_nop\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42)      "_paravirt_nop:\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43)      "ret\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44)      ".size _paravirt_nop, . - _paravirt_nop\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45)      ".type _paravirt_nop, @function\n\t"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46)      ".popsection");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) void __init default_banner(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	printk(KERN_INFO "Booting paravirtualized kernel on %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	       pv_info.name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) /* Undefined instruction for dealing with missing ops pointers. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) static const unsigned char ud2a[] = { 0x0f, 0x0b };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) struct branch {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	unsigned char opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	u32 delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) } __attribute__((packed));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) static unsigned paravirt_patch_call(void *insn_buff, const void *target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 				    unsigned long addr, unsigned len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	const int call_len = 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	struct branch *b = insn_buff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	unsigned long delta = (unsigned long)target - (addr+call_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	if (len < call_len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 		pr_warn("paravirt: Failed to patch indirect CALL at %ps\n", (void *)addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 		/* Kernel might not be viable if patching fails, bail out: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 		BUG_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	b->opcode = 0xe8; /* call */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	b->delta = delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	BUILD_BUG_ON(sizeof(*b) != call_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	return call_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) #ifdef CONFIG_PARAVIRT_XXL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) /* identity function, which can be inlined */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) u64 notrace _paravirt_ident_64(u64 x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	return x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) static unsigned paravirt_patch_jmp(void *insn_buff, const void *target,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 				   unsigned long addr, unsigned len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	struct branch *b = insn_buff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	unsigned long delta = (unsigned long)target - (addr+5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	if (len < 5) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) #ifdef CONFIG_RETPOLINE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 		WARN_ONCE(1, "Failing to patch indirect JMP in %ps\n", (void *)addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 		return len;	/* call too long for patch site */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	b->opcode = 0xe9;	/* jmp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	b->delta = delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	return 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) DEFINE_STATIC_KEY_TRUE(virt_spin_lock_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) void __init native_pv_lock_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 		static_branch_disable(&virt_spin_lock_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) unsigned paravirt_patch_default(u8 type, void *insn_buff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 				unsigned long addr, unsigned len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	 * Neat trick to map patch type back to the call within the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	 * corresponding structure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	void *opfunc = *((void **)&pv_ops + type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	unsigned ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	if (opfunc == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 		/* If there's no function, patch it with a ud2a (BUG) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 		ret = paravirt_patch_insns(insn_buff, len, ud2a, ud2a+sizeof(ud2a));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	else if (opfunc == _paravirt_nop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #ifdef CONFIG_PARAVIRT_XXL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	/* identity functions just return their single argument */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	else if (opfunc == _paravirt_ident_64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 		ret = paravirt_patch_ident_64(insn_buff, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	else if (type == PARAVIRT_PATCH(cpu.iret) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 		 type == PARAVIRT_PATCH(cpu.usergs_sysret64))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 		/* If operation requires a jmp, then jmp */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 		ret = paravirt_patch_jmp(insn_buff, opfunc, addr, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 		/* Otherwise call the function. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 		ret = paravirt_patch_call(insn_buff, opfunc, addr, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) unsigned paravirt_patch_insns(void *insn_buff, unsigned len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 			      const char *start, const char *end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	unsigned insn_len = end - start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	/* Alternative instruction is too large for the patch site and we cannot continue: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	BUG_ON(insn_len > len || start == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	memcpy(insn_buff, start, insn_len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	return insn_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) struct static_key paravirt_steal_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) struct static_key paravirt_steal_rq_enabled;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) static u64 native_steal_clock(int cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) /* These are in entry.S */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) extern void native_iret(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) extern void native_usergs_sysret64(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) static struct resource reserve_ioports = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	.start = 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	.end = IO_SPACE_LIMIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	.name = "paravirt-ioport",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	.flags = IORESOURCE_IO | IORESOURCE_BUSY,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)  * Reserve the whole legacy IO space to prevent any legacy drivers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)  * from wasting time probing for their hardware.  This is a fairly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)  * brute-force approach to disabling all non-virtual drivers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)  * Note that this must be called very early to have any effect.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) int paravirt_disable_iospace(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	return request_resource(&ioport_resource, &reserve_ioports);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) static DEFINE_PER_CPU(enum paravirt_lazy_mode, paravirt_lazy_mode) = PARAVIRT_LAZY_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) static inline void enter_lazy(enum paravirt_lazy_mode mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	BUG_ON(this_cpu_read(paravirt_lazy_mode) != PARAVIRT_LAZY_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	this_cpu_write(paravirt_lazy_mode, mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) static void leave_lazy(enum paravirt_lazy_mode mode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	BUG_ON(this_cpu_read(paravirt_lazy_mode) != mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	this_cpu_write(paravirt_lazy_mode, PARAVIRT_LAZY_NONE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) void paravirt_enter_lazy_mmu(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	enter_lazy(PARAVIRT_LAZY_MMU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) void paravirt_leave_lazy_mmu(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	leave_lazy(PARAVIRT_LAZY_MMU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) void paravirt_flush_lazy_mmu(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	preempt_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 		arch_leave_lazy_mmu_mode();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 		arch_enter_lazy_mmu_mode();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	preempt_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) #ifdef CONFIG_PARAVIRT_XXL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) void paravirt_start_context_switch(struct task_struct *prev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	BUG_ON(preemptible());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	if (this_cpu_read(paravirt_lazy_mode) == PARAVIRT_LAZY_MMU) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 		arch_leave_lazy_mmu_mode();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 		set_ti_thread_flag(task_thread_info(prev), TIF_LAZY_MMU_UPDATES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	enter_lazy(PARAVIRT_LAZY_CPU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) void paravirt_end_context_switch(struct task_struct *next)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	BUG_ON(preemptible());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	leave_lazy(PARAVIRT_LAZY_CPU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	if (test_and_clear_ti_thread_flag(task_thread_info(next), TIF_LAZY_MMU_UPDATES))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 		arch_enter_lazy_mmu_mode();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	if (in_interrupt())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 		return PARAVIRT_LAZY_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	return this_cpu_read(paravirt_lazy_mode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) struct pv_info pv_info = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	.name = "bare hardware",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) #ifdef CONFIG_PARAVIRT_XXL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	.extra_user_64bit_cs = __USER_CS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) /* 64-bit pagetable entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) #define PTE_IDENT	__PV_IS_CALLEE_SAVE(_paravirt_ident_64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) struct paravirt_patch_template pv_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	/* Init ops. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	.init.patch		= native_patch,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	/* Time ops. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	.time.sched_clock	= native_sched_clock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	.time.steal_clock	= native_steal_clock,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	/* Cpu ops. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	.cpu.io_delay		= native_io_delay,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) #ifdef CONFIG_PARAVIRT_XXL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	.cpu.cpuid		= native_cpuid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	.cpu.get_debugreg	= native_get_debugreg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	.cpu.set_debugreg	= native_set_debugreg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	.cpu.read_cr0		= native_read_cr0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	.cpu.write_cr0		= native_write_cr0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	.cpu.write_cr4		= native_write_cr4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	.cpu.wbinvd		= native_wbinvd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	.cpu.read_msr		= native_read_msr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	.cpu.write_msr		= native_write_msr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	.cpu.read_msr_safe	= native_read_msr_safe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	.cpu.write_msr_safe	= native_write_msr_safe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	.cpu.read_pmc		= native_read_pmc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	.cpu.load_tr_desc	= native_load_tr_desc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	.cpu.set_ldt		= native_set_ldt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	.cpu.load_gdt		= native_load_gdt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	.cpu.load_idt		= native_load_idt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	.cpu.store_tr		= native_store_tr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	.cpu.load_tls		= native_load_tls,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	.cpu.load_gs_index	= native_load_gs_index,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	.cpu.write_ldt_entry	= native_write_ldt_entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	.cpu.write_gdt_entry	= native_write_gdt_entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	.cpu.write_idt_entry	= native_write_idt_entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	.cpu.alloc_ldt		= paravirt_nop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	.cpu.free_ldt		= paravirt_nop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	.cpu.load_sp0		= native_load_sp0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	.cpu.usergs_sysret64	= native_usergs_sysret64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	.cpu.iret		= native_iret,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) #ifdef CONFIG_X86_IOPL_IOPERM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	.cpu.invalidate_io_bitmap	= native_tss_invalidate_io_bitmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	.cpu.update_io_bitmap		= native_tss_update_io_bitmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	.cpu.start_context_switch	= paravirt_nop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	.cpu.end_context_switch		= paravirt_nop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	/* Irq ops. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	.irq.save_fl		= __PV_IS_CALLEE_SAVE(native_save_fl),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	.irq.restore_fl		= __PV_IS_CALLEE_SAVE(native_restore_fl),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	.irq.irq_disable	= __PV_IS_CALLEE_SAVE(native_irq_disable),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	.irq.irq_enable		= __PV_IS_CALLEE_SAVE(native_irq_enable),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	.irq.safe_halt		= native_safe_halt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	.irq.halt		= native_halt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) #endif /* CONFIG_PARAVIRT_XXL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	/* Mmu ops. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	.mmu.flush_tlb_user	= native_flush_tlb_local,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	.mmu.flush_tlb_kernel	= native_flush_tlb_global,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	.mmu.flush_tlb_one_user	= native_flush_tlb_one_user,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	.mmu.flush_tlb_others	= native_flush_tlb_others,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	.mmu.tlb_remove_table	=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 			(void (*)(struct mmu_gather *, void *))tlb_remove_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	.mmu.exit_mmap		= paravirt_nop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) #ifdef CONFIG_PARAVIRT_XXL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	.mmu.read_cr2		= __PV_IS_CALLEE_SAVE(native_read_cr2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	.mmu.write_cr2		= native_write_cr2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	.mmu.read_cr3		= __native_read_cr3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	.mmu.write_cr3		= native_write_cr3,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	.mmu.pgd_alloc		= __paravirt_pgd_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	.mmu.pgd_free		= paravirt_nop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	.mmu.alloc_pte		= paravirt_nop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	.mmu.alloc_pmd		= paravirt_nop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	.mmu.alloc_pud		= paravirt_nop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	.mmu.alloc_p4d		= paravirt_nop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	.mmu.release_pte	= paravirt_nop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	.mmu.release_pmd	= paravirt_nop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	.mmu.release_pud	= paravirt_nop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 	.mmu.release_p4d	= paravirt_nop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	.mmu.set_pte		= native_set_pte,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	.mmu.set_pmd		= native_set_pmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	.mmu.ptep_modify_prot_start	= __ptep_modify_prot_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	.mmu.ptep_modify_prot_commit	= __ptep_modify_prot_commit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	.mmu.set_pud		= native_set_pud,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	.mmu.pmd_val		= PTE_IDENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 	.mmu.make_pmd		= PTE_IDENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	.mmu.pud_val		= PTE_IDENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	.mmu.make_pud		= PTE_IDENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	.mmu.set_p4d		= native_set_p4d,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) #if CONFIG_PGTABLE_LEVELS >= 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 	.mmu.p4d_val		= PTE_IDENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	.mmu.make_p4d		= PTE_IDENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	.mmu.set_pgd		= native_set_pgd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) #endif /* CONFIG_PGTABLE_LEVELS >= 5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	.mmu.pte_val		= PTE_IDENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	.mmu.pgd_val		= PTE_IDENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	.mmu.make_pte		= PTE_IDENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	.mmu.make_pgd		= PTE_IDENT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	.mmu.dup_mmap		= paravirt_nop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	.mmu.activate_mm	= paravirt_nop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	.mmu.lazy_mode = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 		.enter		= paravirt_nop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 		.leave		= paravirt_nop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 		.flush		= paravirt_nop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	.mmu.set_fixmap		= native_set_fixmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) #endif /* CONFIG_PARAVIRT_XXL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) #if defined(CONFIG_PARAVIRT_SPINLOCKS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	/* Lock ops. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	.lock.queued_spin_lock_slowpath	= native_queued_spin_lock_slowpath,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	.lock.queued_spin_unlock	=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 				PV_CALLEE_SAVE(__native_queued_spin_unlock),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	.lock.wait			= paravirt_nop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	.lock.kick			= paravirt_nop,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	.lock.vcpu_is_preempted		=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 				PV_CALLEE_SAVE(__native_vcpu_is_preempted),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) #endif /* SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) #ifdef CONFIG_PARAVIRT_XXL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) /* At this point, native_get/set_debugreg has real function entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) NOKPROBE_SYMBOL(native_get_debugreg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) NOKPROBE_SYMBOL(native_set_debugreg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) NOKPROBE_SYMBOL(native_load_idt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) EXPORT_SYMBOL(pv_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) EXPORT_SYMBOL_GPL(pv_info);