Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * 'traps.c' handles hardware traps and faults after we have saved some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * state in 'entry.S'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *  SuperH version: Copyright (C) 1999 Niibe Yutaka
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *                  Copyright (C) 2000 Philipp Rumpf
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  *                  Copyright (C) 2000 David Howells
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  *                  Copyright (C) 2002 - 2010 Paul Mundt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/hardirq.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/kallsyms.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/bug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/debug_locks.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/kdebug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/limits.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <linux/sysfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <linux/perf_event.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <linux/sched/task_stack.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #include <asm/alignment.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #include <asm/fpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #include <asm/kprobes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #include <asm/traps.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #include <asm/bl_bit.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #ifdef CONFIG_CPU_SH2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) # define TRAP_RESERVED_INST	4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) # define TRAP_ILLEGAL_SLOT_INST	6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) # define TRAP_ADDRESS_ERROR	9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) # ifdef CONFIG_CPU_SH2A
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) #  define TRAP_UBC		12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #  define TRAP_FPU_ERROR	13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #  define TRAP_DIVZERO_ERROR	17
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) #  define TRAP_DIVOVF_ERROR	18
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) # endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) #define TRAP_RESERVED_INST	12
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) #define TRAP_ILLEGAL_SLOT_INST	13
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) static inline void sign_extend(unsigned int count, unsigned char *dst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) #ifdef __LITTLE_ENDIAN__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	if ((count == 1) && dst[0] & 0x80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 		dst[1] = 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 		dst[2] = 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 		dst[3] = 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	if ((count == 2) && dst[1] & 0x80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 		dst[2] = 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 		dst[3] = 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	if ((count == 1) && dst[3] & 0x80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 		dst[2] = 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 		dst[1] = 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 		dst[0] = 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	if ((count == 2) && dst[2] & 0x80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 		dst[1] = 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 		dst[0] = 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) static struct mem_access user_mem_access = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	copy_from_user,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	copy_to_user,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79)  * handle an instruction that does an unaligned memory access by emulating the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80)  * desired behaviour
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81)  * - note that PC _may not_ point to the faulting instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82)  *   (if that instruction is in a branch delay slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83)  * - return 0 if emulation okay, -EFAULT on existential error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) static int handle_unaligned_ins(insn_size_t instruction, struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 				struct mem_access *ma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	int ret, index, count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	unsigned long *rm, *rn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	unsigned char *src, *dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	unsigned char __user *srcu, *dstu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	index = (instruction>>8)&15;	/* 0x0F00 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	rn = &regs->regs[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	index = (instruction>>4)&15;	/* 0x00F0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	rm = &regs->regs[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	count = 1<<(instruction&3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	switch (count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	case 1: inc_unaligned_byte_access(); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	case 2: inc_unaligned_word_access(); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	case 4: inc_unaligned_dword_access(); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	case 8: inc_unaligned_multi_access(); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	switch (instruction>>12) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	case 0: /* mov.[bwl] to/from memory via r0+rn */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 		if (instruction & 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 			/* from memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 			srcu = (unsigned char __user *)*rm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 			srcu += regs->regs[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 			dst = (unsigned char *)rn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 			*(unsigned long *)dst = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #if !defined(__LITTLE_ENDIAN__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 			dst += 4-count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 			if (ma->from(dst, srcu, count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 				goto fetch_fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 			sign_extend(count, dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 			/* to memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 			src = (unsigned char *)rm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #if !defined(__LITTLE_ENDIAN__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 			src += 4-count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 			dstu = (unsigned char __user *)*rn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 			dstu += regs->regs[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 			if (ma->to(dstu, src, count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 				goto fetch_fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	case 1: /* mov.l Rm,@(disp,Rn) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 		src = (unsigned char*) rm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 		dstu = (unsigned char __user *)*rn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 		dstu += (instruction&0x000F)<<2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 		if (ma->to(dstu, src, 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 			goto fetch_fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	case 2: /* mov.[bwl] to memory, possibly with pre-decrement */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 		if (instruction & 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 			*rn -= count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 		src = (unsigned char*) rm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 		dstu = (unsigned char __user *)*rn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) #if !defined(__LITTLE_ENDIAN__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 		src += 4-count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 		if (ma->to(dstu, src, count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 			goto fetch_fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	case 5: /* mov.l @(disp,Rm),Rn */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 		srcu = (unsigned char __user *)*rm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 		srcu += (instruction & 0x000F) << 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 		dst = (unsigned char *)rn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 		*(unsigned long *)dst = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		if (ma->from(dst, srcu, 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 			goto fetch_fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	case 6:	/* mov.[bwl] from memory, possibly with post-increment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 		srcu = (unsigned char __user *)*rm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 		if (instruction & 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 			*rm += count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 		dst = (unsigned char*) rn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 		*(unsigned long*)dst = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) #if !defined(__LITTLE_ENDIAN__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 		dst += 4-count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 		if (ma->from(dst, srcu, count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 			goto fetch_fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 		sign_extend(count, dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 		switch ((instruction&0xFF00)>>8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 		case 0x81: /* mov.w R0,@(disp,Rn) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 			src = (unsigned char *) &regs->regs[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) #if !defined(__LITTLE_ENDIAN__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 			src += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 			dstu = (unsigned char __user *)*rm; /* called Rn in the spec */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 			dstu += (instruction & 0x000F) << 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 			if (ma->to(dstu, src, 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 				goto fetch_fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 			ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 		case 0x85: /* mov.w @(disp,Rm),R0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 			srcu = (unsigned char __user *)*rm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 			srcu += (instruction & 0x000F) << 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 			dst = (unsigned char *) &regs->regs[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 			*(unsigned long *)dst = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) #if !defined(__LITTLE_ENDIAN__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 			dst += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 			if (ma->from(dst, srcu, 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 				goto fetch_fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 			sign_extend(2, dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 			ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	case 9: /* mov.w @(disp,PC),Rn */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		srcu = (unsigned char __user *)regs->pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 		srcu += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 		srcu += (instruction & 0x00FF) << 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 		dst = (unsigned char *)rn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 		*(unsigned long *)dst = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) #if !defined(__LITTLE_ENDIAN__)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 		dst += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 		if (ma->from(dst, srcu, 2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 			goto fetch_fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 		sign_extend(2, dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	case 0xd: /* mov.l @(disp,PC),Rn */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 		srcu = (unsigned char __user *)(regs->pc & ~0x3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 		srcu += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 		srcu += (instruction & 0x00FF) << 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 		dst = (unsigned char *)rn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 		*(unsigned long *)dst = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 		if (ma->from(dst, srcu, 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 			goto fetch_fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 		ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)  fetch_fault:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	/* Argh. Address not only misaligned but also non-existent.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	 * Raise an EFAULT and see if it's trapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	die_if_no_fixup("Fault in unaligned fixup", regs, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)  * emulate the instruction in the delay slot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263)  * - fetches the instruction from PC+2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) static inline int handle_delayslot(struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 				   insn_size_t old_instruction,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 				   struct mem_access *ma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	insn_size_t instruction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	void __user *addr = (void __user *)(regs->pc +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 		instruction_size(old_instruction));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	if (copy_from_user(&instruction, addr, sizeof(instruction))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 		/* the instruction-fetch faulted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 		if (user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 		/* kernel */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 		die("delay-slot-insn faulting in handle_unaligned_delayslot",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 		    regs, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	return handle_unaligned_ins(instruction, regs, ma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)  * handle an instruction that does an unaligned memory access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)  * - have to be careful of branch delay-slot instructions that fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)  *  SH3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)  *   - if the branch would be taken PC points to the branch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)  *   - if the branch would not be taken, PC points to delay-slot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)  *  SH4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)  *   - PC always points to delayed branch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)  * - return 0 if handled, -EFAULT if failed (may not return if in kernel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) /* Macros to determine offset from current PC for branch instructions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) /* Explicit type coercion is used to force sign extension where needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) #define SH_PC_8BIT_OFFSET(instr) ((((signed char)(instr))*2) + 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) #define SH_PC_12BIT_OFFSET(instr) ((((signed short)(instr<<4))>>3) + 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 			    struct mem_access *ma, int expected,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 			    unsigned long address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 	u_int rm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	int ret, index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	 * XXX: We can't handle mixed 16/32-bit instructions yet
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	if (instruction_size(instruction) != 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	index = (instruction>>8)&15;	/* 0x0F00 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	rm = regs->regs[index];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	 * Log the unexpected fixups, and then pass them on to perf.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	 * We intentionally don't report the expected cases to perf as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	 * otherwise the trapped I/O case will skew the results too much
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	 * to be useful.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	if (!expected) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 		unaligned_fixups_notify(current, instruction, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 		perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 			      regs, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	ret = -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	switch (instruction&0xF000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	case 0x0000:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 		if (instruction==0x000B) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 			/* rts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 			ret = handle_delayslot(regs, instruction, ma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 			if (ret==0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 				regs->pc = regs->pr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 		else if ((instruction&0x00FF)==0x0023) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 			/* braf @Rm */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 			ret = handle_delayslot(regs, instruction, ma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 			if (ret==0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 				regs->pc += rm + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 		else if ((instruction&0x00FF)==0x0003) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 			/* bsrf @Rm */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 			ret = handle_delayslot(regs, instruction, ma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 			if (ret==0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 				regs->pr = regs->pc + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 				regs->pc += rm + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 			/* mov.[bwl] to/from memory via r0+rn */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 			goto simple;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	case 0x1000: /* mov.l Rm,@(disp,Rn) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 		goto simple;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	case 0x2000: /* mov.[bwl] to memory, possibly with pre-decrement */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 		goto simple;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	case 0x4000:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 		if ((instruction&0x00FF)==0x002B) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 			/* jmp @Rm */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 			ret = handle_delayslot(regs, instruction, ma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 			if (ret==0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 				regs->pc = rm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 		else if ((instruction&0x00FF)==0x000B) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 			/* jsr @Rm */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 			ret = handle_delayslot(regs, instruction, ma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 			if (ret==0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 				regs->pr = regs->pc + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 				regs->pc = rm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 			/* mov.[bwl] to/from memory via r0+rn */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 			goto simple;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	case 0x5000: /* mov.l @(disp,Rm),Rn */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 		goto simple;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	case 0x6000: /* mov.[bwl] from memory, possibly with post-increment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 		goto simple;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	case 0x8000: /* bf lab, bf/s lab, bt lab, bt/s lab */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 		switch (instruction&0x0F00) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 		case 0x0100: /* mov.w R0,@(disp,Rm) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 			goto simple;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 		case 0x0500: /* mov.w @(disp,Rm),R0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 			goto simple;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 		case 0x0B00: /* bf   lab - no delayslot*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 			ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 		case 0x0F00: /* bf/s lab */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 			ret = handle_delayslot(regs, instruction, ma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 			if (ret==0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) #if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 				if ((regs->sr & 0x00000001) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 					regs->pc += 4; /* next after slot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 				else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 					regs->pc += SH_PC_8BIT_OFFSET(instruction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 		case 0x0900: /* bt   lab - no delayslot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 			ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 		case 0x0D00: /* bt/s lab */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 			ret = handle_delayslot(regs, instruction, ma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 			if (ret==0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) #if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 				if ((regs->sr & 0x00000001) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 					regs->pc += 4; /* next after slot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 				else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 					regs->pc += SH_PC_8BIT_OFFSET(instruction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 	case 0x9000: /* mov.w @(disp,Rm),Rn */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 		goto simple;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 	case 0xA000: /* bra label */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 		ret = handle_delayslot(regs, instruction, ma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 		if (ret==0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 			regs->pc += SH_PC_12BIT_OFFSET(instruction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 	case 0xB000: /* bsr label */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 		ret = handle_delayslot(regs, instruction, ma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 		if (ret==0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 			regs->pr = regs->pc + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 			regs->pc += SH_PC_12BIT_OFFSET(instruction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	case 0xD000: /* mov.l @(disp,Rm),Rn */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 		goto simple;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 	/* handle non-delay-slot instruction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)  simple:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	ret = handle_unaligned_ins(instruction, regs, ma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 	if (ret==0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 		regs->pc += instruction_size(instruction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)  * Handle various address error exceptions:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)  *  - instruction address error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463)  *       misaligned PC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)  *       PC >= 0x80000000 in user mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)  *  - data address error (read and write)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)  *       misaligned data access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)  *       access to >= 0x80000000 is user mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)  * Unfortuntaly we can't distinguish between instruction address error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)  * and data address errors caused by read accesses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) asmlinkage void do_address_error(struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 				 unsigned long writeaccess,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 				 unsigned long address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	unsigned long error_code = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 	mm_segment_t oldfs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	insn_size_t instruction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 	int tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 	/* Intentional ifdef */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) #ifdef CONFIG_CPU_HAS_SR_RB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 	error_code = lookup_exception_vector();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 	if (user_mode(regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 		int si_code = BUS_ADRERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 		unsigned int user_action;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 		local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 		inc_unaligned_user_access();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 		oldfs = force_uaccess_begin();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 		if (copy_from_user(&instruction, (insn_size_t *)(regs->pc & ~1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 				   sizeof(instruction))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 			force_uaccess_end(oldfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 			goto uspace_segv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 		force_uaccess_end(oldfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 		/* shout about userspace fixups */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 		unaligned_fixups_notify(current, instruction, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 		user_action = unaligned_user_action();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 		if (user_action & UM_FIXUP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 			goto fixup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 		if (user_action & UM_SIGNAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 			goto uspace_segv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 		else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 			/* ignore */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 			regs->pc += instruction_size(instruction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) fixup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 		/* bad PC is not something we can fix */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 		if (regs->pc & 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 			si_code = BUS_ADRALN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 			goto uspace_segv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 		oldfs = force_uaccess_begin();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 		tmp = handle_unaligned_access(instruction, regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 					      &user_mem_access, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 					      address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 		force_uaccess_end(oldfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 		if (tmp == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 			return; /* sorted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) uspace_segv:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 		printk(KERN_NOTICE "Sending SIGBUS to \"%s\" due to unaligned "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 		       "access (PC %lx PR %lx)\n", current->comm, regs->pc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 		       regs->pr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 		force_sig_fault(SIGBUS, si_code, (void __user *)address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 		inc_unaligned_kernel_access();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 		if (regs->pc & 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 			die("unaligned program counter", regs, error_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 		set_fs(KERNEL_DS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 		if (copy_from_user(&instruction, (void __user *)(regs->pc),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 				   sizeof(instruction))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 			/* Argh. Fault on the instruction itself.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 			   This should never happen non-SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 			*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 			set_fs(oldfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 			die("insn faulting in do_address_error", regs, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 		unaligned_fixups_notify(current, instruction, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 		handle_unaligned_access(instruction, regs, &user_mem_access,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 					0, address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 		set_fs(oldfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) #ifdef CONFIG_SH_DSP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)  *	SH-DSP support gerg@snapgear.com.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) int is_dsp_inst(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 	unsigned short inst = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 	 * Safe guard if DSP mode is already enabled or we're lacking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 	 * the DSP altogether.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 	if (!(current_cpu_data.flags & CPU_HAS_DSP) || (regs->sr & SR_DSP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 	get_user(inst, ((unsigned short *) regs->pc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 	inst &= 0xf000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 	/* Check for any type of DSP or support instruction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 	if ((inst == 0xf000) || (inst == 0x4000))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) #define is_dsp_inst(regs)	(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) #endif /* CONFIG_SH_DSP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) #ifdef CONFIG_CPU_SH2A
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) asmlinkage void do_divide_error(unsigned long r4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 	int code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 	switch (r4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 	case TRAP_DIVZERO_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 		code = FPE_INTDIV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 	case TRAP_DIVOVF_ERROR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 		code = FPE_INTOVF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 		/* Let gcc know unhandled cases don't make it past here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 	force_sig_fault(SIGFPE, code, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) asmlinkage void do_reserved_inst(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 	struct pt_regs *regs = current_pt_regs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 	unsigned long error_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) #ifdef CONFIG_SH_FPU_EMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 	unsigned short inst = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 	get_user(inst, (unsigned short*)regs->pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 	err = do_fpu_inst(inst, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 	if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 		regs->pc += instruction_size(inst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 	/* not a FPU inst. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) #ifdef CONFIG_SH_DSP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 	/* Check if it's a DSP instruction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 	if (is_dsp_inst(regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 		/* Enable DSP mode, and restart instruction. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 		regs->sr |= SR_DSP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 		/* Save DSP mode */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 		current->thread.dsp_status.status |= SR_DSP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 	error_code = lookup_exception_vector();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 	local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 	force_sig(SIGILL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 	die_if_no_fixup("reserved instruction", regs, error_code);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) #ifdef CONFIG_SH_FPU_EMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) static int emulate_branch(unsigned short inst, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 	 * bfs: 8fxx: PC+=d*2+4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 	 * bts: 8dxx: PC+=d*2+4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 	 * bra: axxx: PC+=D*2+4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 	 * bsr: bxxx: PC+=D*2+4  after PR=PC+4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 	 * braf:0x23: PC+=Rn*2+4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 	 * bsrf:0x03: PC+=Rn*2+4 after PR=PC+4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 	 * jmp: 4x2b: PC=Rn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 	 * jsr: 4x0b: PC=Rn      after PR=PC+4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 	 * rts: 000b: PC=PR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 	if (((inst & 0xf000) == 0xb000)  ||	/* bsr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 	    ((inst & 0xf0ff) == 0x0003)  ||	/* bsrf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 	    ((inst & 0xf0ff) == 0x400b))	/* jsr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 		regs->pr = regs->pc + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 	if ((inst & 0xfd00) == 0x8d00) {	/* bfs, bts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 		regs->pc += SH_PC_8BIT_OFFSET(inst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) 	if ((inst & 0xe000) == 0xa000) {	/* bra, bsr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 		regs->pc += SH_PC_12BIT_OFFSET(inst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 	if ((inst & 0xf0df) == 0x0003) {	/* braf, bsrf */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 		regs->pc += regs->regs[(inst & 0x0f00) >> 8] + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) 	if ((inst & 0xf0df) == 0x400b) {	/* jmp, jsr */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) 		regs->pc = regs->regs[(inst & 0x0f00) >> 8];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) 	if ((inst & 0xffff) == 0x000b) {	/* rts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 		regs->pc = regs->pr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) asmlinkage void do_illegal_slot_inst(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) 	struct pt_regs *regs = current_pt_regs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 	unsigned long inst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) 	if (kprobe_handle_illslot(regs->pc) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) #ifdef CONFIG_SH_FPU_EMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 	get_user(inst, (unsigned short *)regs->pc + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) 	if (!do_fpu_inst(inst, regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 		get_user(inst, (unsigned short *)regs->pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) 		if (!emulate_branch(inst, regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) 		/* fault in branch.*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) 	/* not a FPU inst. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) 	inst = lookup_exception_vector();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) 	local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) 	force_sig(SIGILL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) 	die_if_no_fixup("illegal slot instruction", regs, inst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) asmlinkage void do_exception_error(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) 	long ex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) 	ex = lookup_exception_vector();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) 	die_if_kernel("exception", current_pt_regs(), ex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) void per_cpu_trap_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) 	extern void *vbr_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) 	/* NOTE: The VBR value should be at P1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) 	   (or P2, virtural "fixed" address space).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) 	   It's definitely should not in physical address.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) 	asm volatile("ldc	%0, vbr"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) 		     : /* no output */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) 		     : "r" (&vbr_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) 		     : "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) 	/* disable exception blocking now when the vbr has been setup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) 	clear_bl_bit();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) void *set_exception_table_vec(unsigned int vec, void *handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) 	extern void *exception_handling_table[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) 	void *old_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) 	old_handler = exception_handling_table[vec];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) 	exception_handling_table[vec] = handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) 	return old_handler;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) void __init trap_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) 	set_exception_table_vec(TRAP_RESERVED_INST, do_reserved_inst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) 	set_exception_table_vec(TRAP_ILLEGAL_SLOT_INST, do_illegal_slot_inst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) #if defined(CONFIG_CPU_SH4) && !defined(CONFIG_SH_FPU) || \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760)     defined(CONFIG_SH_FPU_EMU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) 	 * For SH-4 lacking an FPU, treat floating point instructions as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) 	 * reserved. They'll be handled in the math-emu case, or faulted on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) 	 * otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) 	set_exception_table_evt(0x800, do_reserved_inst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) 	set_exception_table_evt(0x820, do_illegal_slot_inst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) #elif defined(CONFIG_SH_FPU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) 	set_exception_table_evt(0x800, fpu_state_restore_trap_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) 	set_exception_table_evt(0x820, fpu_state_restore_trap_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) #ifdef CONFIG_CPU_SH2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) 	set_exception_table_vec(TRAP_ADDRESS_ERROR, address_error_trap_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) #ifdef CONFIG_CPU_SH2A
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) 	set_exception_table_vec(TRAP_DIVZERO_ERROR, do_divide_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) 	set_exception_table_vec(TRAP_DIVOVF_ERROR, do_divide_error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) #ifdef CONFIG_SH_FPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) 	set_exception_table_vec(TRAP_FPU_ERROR, fpu_error_trap_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) #ifdef TRAP_UBC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) 	set_exception_table_vec(TRAP_UBC, breakpoint_trap_handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) }