Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) #ifndef _ASM_X86_TEXT_PATCHING_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) #define _ASM_X86_TEXT_PATCHING_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include <linux/stddef.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <asm/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) struct paravirt_patch_site;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #ifdef CONFIG_PARAVIRT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) void apply_paravirt(struct paravirt_patch_site *start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) 		    struct paravirt_patch_site *end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) static inline void apply_paravirt(struct paravirt_patch_site *start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 				  struct paravirt_patch_site *end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #define __parainstructions	NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #define __parainstructions_end	NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  * Currently, the max observed size in the kernel code is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  * JUMP_LABEL_NOP_SIZE/RELATIVEJUMP_SIZE, which are 5.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  * Raise it if needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #define POKE_MAX_OPCODE_SIZE	5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) extern void text_poke_early(void *addr, const void *opcode, size_t len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  * Clear and restore the kernel write-protection flag on the local CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  * Allows the kernel to edit read-only pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  * Side-effect: any interrupt handler running between save and restore will have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  * the ability to write to read-only pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36)  * Warning:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37)  * Code patching in the UP case is safe if NMIs and MCE handlers are stopped and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38)  * no thread can be preempted in the instructions being modified (no iret to an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39)  * invalid instruction possible) or if the instructions are changed from a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40)  * consistent state to another consistent state atomically.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41)  * On the local CPU you need to be protected against NMI or MCE handlers seeing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42)  * an inconsistent instruction while you patch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) extern void *text_poke(void *addr, const void *opcode, size_t len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) extern void text_poke_sync(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) extern void *text_poke_kgdb(void *addr, const void *opcode, size_t len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) extern int poke_int3_handler(struct pt_regs *regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) extern void text_poke_bp(void *addr, const void *opcode, size_t len, const void *emulate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) extern void text_poke_queue(void *addr, const void *opcode, size_t len, const void *emulate);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) extern void text_poke_finish(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) #define INT3_INSN_SIZE		1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) #define INT3_INSN_OPCODE	0xCC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) #define RET_INSN_SIZE		1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) #define RET_INSN_OPCODE		0xC3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) #define CALL_INSN_SIZE		5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) #define CALL_INSN_OPCODE	0xE8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) #define JMP32_INSN_SIZE		5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) #define JMP32_INSN_OPCODE	0xE9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) #define JMP8_INSN_SIZE		2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) #define JMP8_INSN_OPCODE	0xEB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) #define DISP32_SIZE		4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) static __always_inline int text_opcode_size(u8 opcode)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	int size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) #define __CASE(insn)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	case insn##_INSN_OPCODE: size = insn##_INSN_SIZE; break
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	switch(opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	__CASE(INT3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	__CASE(RET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	__CASE(CALL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	__CASE(JMP32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	__CASE(JMP8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) #undef __CASE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) union text_poke_insn {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	u8 text[POKE_MAX_OPCODE_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 		u8 opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 		s32 disp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	} __attribute__((packed));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) static __always_inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) void *text_gen_insn(u8 opcode, const void *addr, const void *dest)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	static union text_poke_insn insn; /* per instance */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	int size = text_opcode_size(opcode);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	insn.opcode = opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	if (size > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 		insn.disp = (long)dest - (long)(addr + size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 		if (size == 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 			 * Ensure that for JMP9 the displacement
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 			 * actually fits the signed byte.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 			BUG_ON((insn.disp >> 31) != (insn.disp >> 7));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	return &insn.text;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) extern int after_bootmem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) extern __ro_after_init struct mm_struct *poking_mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) extern __ro_after_init unsigned long poking_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #ifndef CONFIG_UML_X86
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) static __always_inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) void int3_emulate_jmp(struct pt_regs *regs, unsigned long ip)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	regs->ip = ip;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) static __always_inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) void int3_emulate_push(struct pt_regs *regs, unsigned long val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	 * The int3 handler in entry_64.S adds a gap between the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	 * stack where the break point happened, and the saving of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	 * pt_regs. We can extend the original stack because of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	 * this gap. See the idtentry macro's create_gap option.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	 * Similarly entry_32.S will have a gap on the stack for (any) hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	 * exception and pt_regs; see FIXUP_FRAME.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	regs->sp -= sizeof(unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	*(unsigned long *)regs->sp = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) static __always_inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) unsigned long int3_emulate_pop(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	unsigned long val = *(unsigned long *)regs->sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	regs->sp += sizeof(unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) static __always_inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) void int3_emulate_call(struct pt_regs *regs, unsigned long func)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	int3_emulate_push(regs, regs->ip - INT3_INSN_SIZE + CALL_INSN_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	int3_emulate_jmp(regs, func);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) static __always_inline
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) void int3_emulate_ret(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	unsigned long ip = int3_emulate_pop(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	int3_emulate_jmp(regs, ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) #endif /* !CONFIG_UML_X86 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) #endif /* _ASM_X86_TEXT_PATCHING_H */