Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags   |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0-or-later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright (C) 2004 Paul Mackerras <paulus@au.ibm.com>, IBM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) #include <asm/inst.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) struct pt_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  * We don't allow single-stepping an mtmsrd that would clear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  * MSR_RI, since that would make the exception unrecoverable.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  * Since we need to single-step to proceed from a breakpoint,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  * we don't allow putting a breakpoint on an mtmsrd instruction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  * Similarly we don't allow breakpoints on rfid instructions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  * These macros tell us if an instruction is a mtmsrd or rfid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  * Note that IS_MTMSRD returns true for both an mtmsr (32-bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17)  * and an mtmsrd (64-bit).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #define IS_MTMSRD(instr)	((ppc_inst_val(instr) & 0xfc0007be) == 0x7c000124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #define IS_RFID(instr)		((ppc_inst_val(instr) & 0xfc0007fe) == 0x4c000024)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #define IS_RFI(instr)		((ppc_inst_val(instr) & 0xfc0007fe) == 0x4c000064)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) enum instruction_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	COMPUTE,		/* arith/logical/CR op, etc. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 	LOAD,			/* load and store types need to be contiguous */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 	LOAD_MULTI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 	LOAD_FP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	LOAD_VMX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	LOAD_VSX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	STORE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	STORE_MULTI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	STORE_FP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	STORE_VMX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	STORE_VSX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	LARX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	STCX,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	BRANCH,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	MFSPR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	MTSPR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	CACHEOP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	BARRIER,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	SYSCALL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	SYSCALL_VECTORED_0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	MFMSR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	MTMSR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	RFI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	INTERRUPT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	UNKNOWN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) #define INSTR_TYPE_MASK	0x1f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) #define OP_IS_LOAD(type)	((LOAD <= (type) && (type) <= LOAD_VSX) || (type) == LARX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) #define OP_IS_STORE(type)	((STORE <= (type) && (type) <= STORE_VSX) || (type) == STCX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) #define OP_IS_LOAD_STORE(type)	(LOAD <= (type) && (type) <= STCX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) /* Compute flags, ORed in with type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) #define SETREG		0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) #define SETCC		0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) #define SETXER		0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) /* Branch flags, ORed in with type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) #define SETLK		0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) #define BRTAKEN		0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) #define DECCTR		0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) /* Load/store flags, ORed in with type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) #define SIGNEXT		0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) #define UPDATE		0x40	/* matches bit in opcode 31 instructions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) #define BYTEREV		0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) #define FPCONV		0x100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) /* Barrier type field, ORed in with type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) #define BARRIER_MASK	0xe0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) #define BARRIER_SYNC	0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) #define BARRIER_ISYNC	0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) #define BARRIER_EIEIO	0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) #define BARRIER_LWSYNC	0x60
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) #define BARRIER_PTESYNC	0x80
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) /* Cacheop values, ORed in with type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) #define CACHEOP_MASK	0x700
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) #define DCBST		0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) #define DCBF		0x100
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) #define DCBTST		0x200
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) #define DCBT		0x300
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) #define ICBI		0x400
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) #define DCBZ		0x500
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) /* VSX flags values */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) #define VSX_FPCONV	1	/* do floating point SP/DP conversion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) #define VSX_SPLAT	2	/* store loaded value into all elements */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) #define VSX_LDLEFT	4	/* load VSX register from left */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) #define VSX_CHECK_VEC	8	/* check MSR_VEC not MSR_VSX for reg >= 32 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) /* Prefixed flag, ORed in with type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) #define PREFIXED       0x800
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) /* Size field in type word */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define SIZE(n)		((n) << 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define GETSIZE(w)	((w) >> 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #define GETTYPE(t)	((t) & INSTR_TYPE_MASK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define GETLENGTH(t)   (((t) & PREFIXED) ? 8 : 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define MKOP(t, f, s)	((t) | (f) | SIZE(s))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /* Prefix instruction operands */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #define GET_PREFIX_RA(i)	(((i) >> 16) & 0x1f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define GET_PREFIX_R(i)		((i) & (1ul << 20))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) extern s32 patch__exec_instr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) struct instruction_op {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	int type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	int reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	unsigned long val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	/* For LOAD/STORE/LARX/STCX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	unsigned long ea;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	int update_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	/* For MFSPR */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	int spr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	u32 ccval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	u32 xerval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	u8 element_size;	/* for VSX/VMX loads/stores */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	u8 vsx_flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) union vsx_reg {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	u8	b[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	u16	h[8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	u32	w[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	unsigned long d[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	float	fp[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	double	dp[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	__vector128 v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)  * Decode an instruction, and return information about it in *op
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)  * without changing *regs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)  * Return value is 1 if the instruction can be emulated just by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)  * updating *regs with the information in *op, -1 if we need the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)  * GPRs but *regs doesn't contain the full register set, or 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)  * otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) extern int analyse_instr(struct instruction_op *op, const struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 			 struct ppc_inst instr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)  * Emulate an instruction that can be executed just by updating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)  * fields in *regs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) void emulate_update_regs(struct pt_regs *reg, struct instruction_op *op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)  * Emulate instructions that cause a transfer of control,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)  * arithmetic/logical instructions, loads and stores,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)  * cache operations and barriers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)  * Returns 1 if the instruction was emulated successfully,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)  * 0 if it could not be emulated, or -1 for an instruction that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)  * should not be emulated (rfid, mtmsrd clearing MSR_RI, etc.).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) extern int emulate_step(struct pt_regs *regs, struct ppc_inst instr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)  * Emulate a load or store instruction by reading/writing the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)  * memory of the current process.  FP/VMX/VSX registers are assumed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)  * to hold live values if the appropriate enable bit in regs->msr is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)  * set; otherwise this will use the saved values in the thread struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)  * for user-mode accesses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) extern int emulate_loadstore(struct pt_regs *regs, struct instruction_op *op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) extern void emulate_vsx_load(struct instruction_op *op, union vsx_reg *reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 			     const void *mem, bool cross_endian);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) extern void emulate_vsx_store(struct instruction_op *op,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 			      const union vsx_reg *reg, void *mem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 			      bool cross_endian);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) extern int emulate_dcbz(unsigned long ea, struct pt_regs *regs);