Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  *  User-space Probes (UProbes) for s390
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *    Copyright IBM Corp. 2014
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *    Author(s): Jan Willeke,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/uprobes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/compat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/kdebug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/sched/task_stack.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <asm/switch_to.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <asm/facility.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <asm/kprobes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <asm/dis.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include "entry.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #define	UPROBE_TRAP_NR	UINT_MAX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 			     unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 	return probe_is_prohibited_opcode(auprobe->insn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	if (psw_bits(regs->psw).eaba == PSW_BITS_AMODE_24BIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	if (!is_compat_task() && psw_bits(regs->psw).eaba == PSW_BITS_AMODE_31BIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	clear_pt_regs_flag(regs, PIF_PER_TRAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	auprobe->saved_per = psw_bits(regs->psw).per;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	auprobe->saved_int_code = regs->int_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	regs->int_code = UPROBE_TRAP_NR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	regs->psw.addr = current->utask->xol_vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	set_tsk_thread_flag(current, TIF_UPROBE_SINGLESTEP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	update_cr_regs(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) bool arch_uprobe_xol_was_trapped(struct task_struct *tsk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	struct pt_regs *regs = task_pt_regs(tsk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	if (regs->int_code != UPROBE_TRAP_NR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) static int check_per_event(unsigned short cause, unsigned long control,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 			   struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	if (!(regs->psw.mask & PSW_MASK_PER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	/* user space single step */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	if (control == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	/* over indication for storage alteration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	if ((control & 0x20200000) && (cause & 0x2000))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	if (cause & 0x8000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 		/* all branches */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 		if ((control & 0x80800000) == 0x80000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 		/* branch into selected range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 		if (((control & 0x80800000) == 0x80800000) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 		    regs->psw.addr >= current->thread.per_user.start &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 		    regs->psw.addr <= current->thread.per_user.end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	int fixup = probe_get_fixup_type(auprobe->insn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	struct uprobe_task *utask = current->utask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	clear_tsk_thread_flag(current, TIF_UPROBE_SINGLESTEP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	update_cr_regs(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	psw_bits(regs->psw).per = auprobe->saved_per;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	regs->int_code = auprobe->saved_int_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	if (fixup & FIXUP_PSW_NORMAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 		regs->psw.addr += utask->vaddr - utask->xol_vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	if (fixup & FIXUP_RETURN_REGISTER) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 		int reg = (auprobe->insn[0] & 0xf0) >> 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 		regs->gprs[reg] += utask->vaddr - utask->xol_vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	if (fixup & FIXUP_BRANCH_NOT_TAKEN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 		int ilen = insn_length(auprobe->insn[0] >> 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 		if (regs->psw.addr - utask->xol_vaddr == ilen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 			regs->psw.addr = utask->vaddr + ilen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	if (check_per_event(current->thread.per_event.cause,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 			    current->thread.per_user.control, regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 		/* fix per address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 		current->thread.per_event.address = utask->vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 		/* trigger per event */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		set_pt_regs_flag(regs, PIF_PER_TRAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 				 void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	struct die_args *args = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	struct pt_regs *regs = args->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	if (!user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 		return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	if (regs->int_code & 0x200) /* Trap during transaction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 		return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	switch (val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	case DIE_BPT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 		if (uprobe_pre_sstep_notifier(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 			return NOTIFY_STOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	case DIE_SSTEP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 		if (uprobe_post_sstep_notifier(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 			return NOTIFY_STOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	return NOTIFY_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	clear_thread_flag(TIF_UPROBE_SINGLESTEP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	regs->int_code = auprobe->saved_int_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	regs->psw.addr = current->utask->vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	current->thread.per_event.address = current->utask->vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 						struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	unsigned long orig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	orig = regs->gprs[14];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	regs->gprs[14] = trampoline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	return orig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) bool arch_uretprobe_is_alive(struct return_instance *ret, enum rp_check ctx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 			     struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	if (ctx == RP_CHECK_CHAIN_CALL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 		return user_stack_pointer(regs) <= ret->stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 		return user_stack_pointer(regs) < ret->stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) /* Instruction Emulation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) static void adjust_psw_addr(psw_t *psw, unsigned long len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	psw->addr = __rewind_psw(*psw, -len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) #define EMU_ILLEGAL_OP		1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) #define EMU_SPECIFICATION	2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) #define EMU_ADDRESSING		3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) #define emu_load_ril(ptr, output)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) ({							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	unsigned int mask = sizeof(*(ptr)) - 1;		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	__typeof__(*(ptr)) input;			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	int __rc = 0;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	if (!test_facility(34))				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 		__rc = EMU_ILLEGAL_OP;			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	else if ((u64 __force)ptr & mask)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 		__rc = EMU_SPECIFICATION;		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	else if (get_user(input, ptr))			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 		__rc = EMU_ADDRESSING;			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	else						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 		*(output) = input;			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	__rc;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) #define emu_store_ril(regs, ptr, input)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) ({							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	unsigned int mask = sizeof(*(ptr)) - 1;		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	__typeof__(ptr) __ptr = (ptr);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	int __rc = 0;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	if (!test_facility(34))				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 		__rc = EMU_ILLEGAL_OP;			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 	else if ((u64 __force)__ptr & mask)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 		__rc = EMU_SPECIFICATION;		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	else if (put_user(*(input), __ptr))		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		__rc = EMU_ADDRESSING;			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	if (__rc == 0)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 		sim_stor_event(regs,			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 			       (void __force *)__ptr,	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 			       mask + 1);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	__rc;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) #define emu_cmp_ril(regs, ptr, cmp)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) ({							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	unsigned int mask = sizeof(*(ptr)) - 1;		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	__typeof__(*(ptr)) input;			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	int __rc = 0;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	if (!test_facility(34))				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 		__rc = EMU_ILLEGAL_OP;			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	else if ((u64 __force)ptr & mask)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 		__rc = EMU_SPECIFICATION;		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	else if (get_user(input, ptr))			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 		__rc = EMU_ADDRESSING;			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 	else if (input > *(cmp))			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 		psw_bits((regs)->psw).cc = 1;		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	else if (input < *(cmp))			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 		psw_bits((regs)->psw).cc = 2;		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	else						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 		psw_bits((regs)->psw).cc = 0;		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	__rc;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) struct insn_ril {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	u8 opc0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	u8 reg	: 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	u8 opc1 : 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	s32 disp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) } __packed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) union split_register {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	u64 u64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	u32 u32[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	u16 u16[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	s64 s64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	s32 s32[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	s16 s16[4];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)  * If user per registers are setup to trace storage alterations and an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)  * emulated store took place on a fitting address a user trap is generated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) static void sim_stor_event(struct pt_regs *regs, void *addr, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	if (!(regs->psw.mask & PSW_MASK_PER))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	if (!(current->thread.per_user.control & PER_EVENT_STORE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	if ((void *)current->thread.per_user.start > (addr + len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	if ((void *)current->thread.per_user.end < addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	current->thread.per_event.address = regs->psw.addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	current->thread.per_event.cause = PER_EVENT_STORE >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	set_pt_regs_flag(regs, PIF_PER_TRAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)  * pc relative instructions are emulated, since parameters may not be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)  * accessible from the xol area due to range limitations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) static void handle_insn_ril(struct arch_uprobe *auprobe, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	union split_register *rx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	struct insn_ril *insn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	unsigned int ilen;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	void *uptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	int rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	insn = (struct insn_ril *) &auprobe->insn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	rx = (union split_register *) &regs->gprs[insn->reg];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	uptr = (void *)(regs->psw.addr + (insn->disp * 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	ilen = insn_length(insn->opc0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	switch (insn->opc0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	case 0xc0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 		switch (insn->opc1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 		case 0x00: /* larl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 			rx->u64 = (unsigned long)uptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	case 0xc4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 		switch (insn->opc1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 		case 0x02: /* llhrl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 			rc = emu_load_ril((u16 __user *)uptr, &rx->u32[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 		case 0x04: /* lghrl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 			rc = emu_load_ril((s16 __user *)uptr, &rx->u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 		case 0x05: /* lhrl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 			rc = emu_load_ril((s16 __user *)uptr, &rx->u32[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 		case 0x06: /* llghrl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 			rc = emu_load_ril((u16 __user *)uptr, &rx->u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 		case 0x08: /* lgrl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 			rc = emu_load_ril((u64 __user *)uptr, &rx->u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 		case 0x0c: /* lgfrl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 			rc = emu_load_ril((s32 __user *)uptr, &rx->u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 		case 0x0d: /* lrl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 			rc = emu_load_ril((u32 __user *)uptr, &rx->u32[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 		case 0x0e: /* llgfrl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 			rc = emu_load_ril((u32 __user *)uptr, &rx->u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 		case 0x07: /* sthrl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 			rc = emu_store_ril(regs, (u16 __user *)uptr, &rx->u16[3]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 		case 0x0b: /* stgrl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 			rc = emu_store_ril(regs, (u64 __user *)uptr, &rx->u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 		case 0x0f: /* strl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 			rc = emu_store_ril(regs, (u32 __user *)uptr, &rx->u32[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	case 0xc6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 		switch (insn->opc1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 		case 0x02: /* pfdrl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 			if (!test_facility(34))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 				rc = EMU_ILLEGAL_OP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 		case 0x04: /* cghrl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 			rc = emu_cmp_ril(regs, (s16 __user *)uptr, &rx->s64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 		case 0x05: /* chrl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 			rc = emu_cmp_ril(regs, (s16 __user *)uptr, &rx->s32[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 		case 0x06: /* clghrl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 			rc = emu_cmp_ril(regs, (u16 __user *)uptr, &rx->u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 		case 0x07: /* clhrl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 			rc = emu_cmp_ril(regs, (u16 __user *)uptr, &rx->u32[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 		case 0x08: /* cgrl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 			rc = emu_cmp_ril(regs, (s64 __user *)uptr, &rx->s64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 		case 0x0a: /* clgrl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 			rc = emu_cmp_ril(regs, (u64 __user *)uptr, &rx->u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 		case 0x0c: /* cgfrl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 			rc = emu_cmp_ril(regs, (s32 __user *)uptr, &rx->s64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 		case 0x0d: /* crl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 			rc = emu_cmp_ril(regs, (s32 __user *)uptr, &rx->s32[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 		case 0x0e: /* clgfrl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 			rc = emu_cmp_ril(regs, (u32 __user *)uptr, &rx->u64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 		case 0x0f: /* clrl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 			rc = emu_cmp_ril(regs, (u32 __user *)uptr, &rx->u32[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	adjust_psw_addr(&regs->psw, ilen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	switch (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	case EMU_ILLEGAL_OP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 		regs->int_code = ilen << 16 | 0x0001;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 		do_report_trap(regs, SIGILL, ILL_ILLOPC, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	case EMU_SPECIFICATION:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 		regs->int_code = ilen << 16 | 0x0006;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 		do_report_trap(regs, SIGILL, ILL_ILLOPC , NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	case EMU_ADDRESSING:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 		regs->int_code = ilen << 16 | 0x0005;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 		do_report_trap(regs, SIGSEGV, SEGV_MAPERR, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	if ((psw_bits(regs->psw).eaba == PSW_BITS_AMODE_24BIT) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	    ((psw_bits(regs->psw).eaba == PSW_BITS_AMODE_31BIT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	     !is_compat_task())) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 		regs->psw.addr = __rewind_psw(regs->psw, UPROBE_SWBP_INSN_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 		do_report_trap(regs, SIGILL, ILL_ILLADR, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	if (probe_is_insn_relative_long(auprobe->insn)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 		handle_insn_ril(auprobe, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }