Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * arch/alpha/kernel/traps.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * (C) Copyright 1994 Linus Torvalds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * This file initializes the trap entry points
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/jiffies.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/sched/debug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/tty.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/delay.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/extable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/kallsyms.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/ratelimit.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <asm/gentrap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <asm/unaligned.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <asm/sysinfo.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include <asm/hwrpb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #include <asm/mmu_context.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #include <asm/special_insns.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #include "proto.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) /* Work-around for some SRMs which mishandle opDEC faults.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) static int opDEC_fix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) opDEC_check(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	__asm__ __volatile__ (
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	/* Load the address of... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	"	br	$16, 1f\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	/* A stub instruction fault handler.  Just add 4 to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	   pc and continue.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	"	ldq	$16, 8($sp)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	"	addq	$16, 4, $16\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	"	stq	$16, 8($sp)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	"	call_pal %[rti]\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	/* Install the instruction fault handler.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	"1:	lda	$17, 3\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	"	call_pal %[wrent]\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	/* With that in place, the fault from the round-to-minf fp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	   insn will arrive either at the "lda 4" insn (bad) or one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	   past that (good).  This places the correct fixup in %0.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	"	lda %[fix], 0\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	"	cvttq/svm $f31,$f31\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	"	lda %[fix], 4"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	: [fix] "=r" (opDEC_fix)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	: [rti] "n" (PAL_rti), [wrent] "n" (PAL_wrent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	: "$0", "$1", "$16", "$17", "$22", "$23", "$24", "$25");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	if (opDEC_fix)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 		printk("opDEC fixup enabled.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) dik_show_regs(struct pt_regs *regs, unsigned long *r9_15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	printk("pc = [<%016lx>]  ra = [<%016lx>]  ps = %04lx    %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	       regs->pc, regs->r26, regs->ps, print_tainted());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	printk("pc is at %pSR\n", (void *)regs->pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	printk("ra is at %pSR\n", (void *)regs->r26);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	printk("v0 = %016lx  t0 = %016lx  t1 = %016lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	       regs->r0, regs->r1, regs->r2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	printk("t2 = %016lx  t3 = %016lx  t4 = %016lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75)  	       regs->r3, regs->r4, regs->r5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	printk("t5 = %016lx  t6 = %016lx  t7 = %016lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	       regs->r6, regs->r7, regs->r8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	if (r9_15) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 		printk("s0 = %016lx  s1 = %016lx  s2 = %016lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 		       r9_15[9], r9_15[10], r9_15[11]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 		printk("s3 = %016lx  s4 = %016lx  s5 = %016lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 		       r9_15[12], r9_15[13], r9_15[14]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 		printk("s6 = %016lx\n", r9_15[15]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	printk("a0 = %016lx  a1 = %016lx  a2 = %016lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	       regs->r16, regs->r17, regs->r18);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	printk("a3 = %016lx  a4 = %016lx  a5 = %016lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90)  	       regs->r19, regs->r20, regs->r21);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91)  	printk("t8 = %016lx  t9 = %016lx  t10= %016lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	       regs->r22, regs->r23, regs->r24);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	printk("t11= %016lx  pv = %016lx  at = %016lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	       regs->r25, regs->r27, regs->r28);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	printk("gp = %016lx  sp = %p\n", regs->gp, regs+1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) __halt();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #if 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) static char * ireg_name[] = {"v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 			   "t7", "s0", "s1", "s2", "s3", "s4", "s5", "s6",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 			   "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 			   "t10", "t11", "ra", "pv", "at", "gp", "sp", "zero"};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) dik_show_code(unsigned int *pc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	printk("Code:");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	for (i = -6; i < 2; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 		unsigned int insn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 		if (__get_user(insn, (unsigned int __user *)pc + i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 		printk("%c%08x%c", i ? ' ' : '<', insn, i ? ' ' : '>');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	printk("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) dik_show_trace(unsigned long *sp, const char *loglvl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	long i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	printk("%sTrace:\n", loglvl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	while (0x1ff8 & (unsigned long) sp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 		extern char _stext[], _etext[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 		unsigned long tmp = *sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 		sp++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 		if (tmp < (unsigned long) &_stext)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 		if (tmp >= (unsigned long) &_etext)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 		printk("%s[<%lx>] %pSR\n", loglvl, tmp, (void *)tmp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 		if (i > 40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 			printk("%s ...", loglvl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	printk("%s\n", loglvl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) static int kstack_depth_to_print = 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	unsigned long *stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	 * debugging aid: "show_stack(NULL, NULL, KERN_EMERG);" prints the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	 * back trace for this cpu.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	if(sp==NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 		sp=(unsigned long*)&sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	stack = sp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	for(i=0; i < kstack_depth_to_print; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 		if (((long) stack & (THREAD_SIZE-1)) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 		if ((i % 4) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 			if (i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 				pr_cont("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 			printk("%s       ", loglvl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 			pr_cont(" ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 		pr_cont("%016lx", *stack++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	pr_cont("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	dik_show_trace(sp, loglvl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) die_if_kernel(char * str, struct pt_regs *regs, long err, unsigned long *r9_15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	if (regs->ps & 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	printk("CPU %d ", hard_smp_processor_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	printk("%s(%d): %s %ld\n", current->comm, task_pid_nr(current), str, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	dik_show_regs(regs, r9_15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	dik_show_trace((unsigned long *)(regs+1), KERN_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	dik_show_code((unsigned int *)regs->pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 		printk("die_if_kernel recursion detected.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 		local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 		while (1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	do_exit(SIGSEGV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) #ifndef CONFIG_MATHEMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) static long dummy_emul(void) { return 0; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) long (*alpha_fp_emul_imprecise)(struct pt_regs *regs, unsigned long writemask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)   = (void *)dummy_emul;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) EXPORT_SYMBOL_GPL(alpha_fp_emul_imprecise);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) long (*alpha_fp_emul) (unsigned long pc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)   = (void *)dummy_emul;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) EXPORT_SYMBOL_GPL(alpha_fp_emul);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) long alpha_fp_emul_imprecise(struct pt_regs *regs, unsigned long writemask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) long alpha_fp_emul (unsigned long pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) asmlinkage void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) do_entArith(unsigned long summary, unsigned long write_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	    struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	long si_code = FPE_FLTINV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	if (summary & 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 		/* Software-completion summary bit is set, so try to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 		   emulate the instruction.  If the processor supports
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 		   precise exceptions, we don't have to search.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 		if (!amask(AMASK_PRECISE_TRAP))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 			si_code = alpha_fp_emul(regs->pc - 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 			si_code = alpha_fp_emul_imprecise(regs, write_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 		if (si_code == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	die_if_kernel("Arithmetic fault", regs, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	send_sig_fault(SIGFPE, si_code, (void __user *) regs->pc, 0, current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) asmlinkage void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) do_entIF(unsigned long type, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	int signo, code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	if ((regs->ps & ~IPL_MAX) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 		if (type == 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 			const unsigned int *data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 			  = (const unsigned int *) regs->pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 			printk("Kernel bug at %s:%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 			       (const char *)(data[1] | (long)data[2] << 32), 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 			       data[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) #ifdef CONFIG_ALPHA_WTINT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 		if (type == 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 			/* If CALL_PAL WTINT is totally unsupported by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 			   PALcode, e.g. MILO, "emulate" it by overwriting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 			   the insn.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 			unsigned int *pinsn
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 			  = (unsigned int *) regs->pc - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 			if (*pinsn == PAL_wtint) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 				*pinsn = 0x47e01400; /* mov 0,$0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 				imb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 				regs->r0 = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 				return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) #endif /* ALPHA_WTINT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 		die_if_kernel((type == 1 ? "Kernel Bug" : "Instruction fault"),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 			      regs, type, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	switch (type) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	      case 0: /* breakpoint */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 		if (ptrace_cancel_bpt(current)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 			regs->pc -= 4;	/* make pc point to former bpt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 		send_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->pc, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 			       current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	      case 1: /* bugcheck */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 		send_sig_fault(SIGTRAP, TRAP_UNK, (void __user *) regs->pc, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 			       current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 		
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	      case 2: /* gentrap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 		switch ((long) regs->r16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 		case GEN_INTOVF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 			signo = SIGFPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 			code = FPE_INTOVF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 		case GEN_INTDIV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 			signo = SIGFPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 			code = FPE_INTDIV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 		case GEN_FLTOVF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 			signo = SIGFPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 			code = FPE_FLTOVF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 		case GEN_FLTDIV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 			signo = SIGFPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 			code = FPE_FLTDIV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 		case GEN_FLTUND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 			signo = SIGFPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 			code = FPE_FLTUND;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 		case GEN_FLTINV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 			signo = SIGFPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 			code = FPE_FLTINV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 		case GEN_FLTINE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 			signo = SIGFPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 			code = FPE_FLTRES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 		case GEN_ROPRAND:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 			signo = SIGFPE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 			code = FPE_FLTUNK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 		case GEN_DECOVF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 		case GEN_DECDIV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 		case GEN_DECINV:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 		case GEN_ASSERTERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 		case GEN_NULPTRERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 		case GEN_STKOVF:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 		case GEN_STRLENERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 		case GEN_SUBSTRERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 		case GEN_RANGERR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 		case GEN_SUBRNG:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 		case GEN_SUBRNG1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 		case GEN_SUBRNG2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 		case GEN_SUBRNG3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 		case GEN_SUBRNG4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 		case GEN_SUBRNG5:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 		case GEN_SUBRNG6:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 		case GEN_SUBRNG7:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 			signo = SIGTRAP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 			code = TRAP_UNK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 		send_sig_fault(signo, code, (void __user *) regs->pc, regs->r16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 			       current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	      case 4: /* opDEC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 		if (implver() == IMPLVER_EV4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 			long si_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 			/* The some versions of SRM do not handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 			   the opDEC properly - they return the PC of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 			   opDEC fault, not the instruction after as the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 			   Alpha architecture requires.  Here we fix it up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 			   We do this by intentionally causing an opDEC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 			   fault during the boot sequence and testing if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 			   we get the correct PC.  If not, we set a flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 			   to correct it every time through.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 			regs->pc += opDEC_fix; 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 			
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 			/* EV4 does not implement anything except normal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 			   rounding.  Everything else will come here as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 			   an illegal instruction.  Emulate them.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 			si_code = alpha_fp_emul(regs->pc - 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 			if (si_code == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 				return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 			if (si_code > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 				send_sig_fault(SIGFPE, si_code,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 					       (void __user *) regs->pc, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 					       current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 				return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	      case 3: /* FEN fault */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 		/* Irritating users can call PAL_clrfen to disable the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 		   FPU for the process.  The kernel will then trap in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 		   do_switch_stack and undo_switch_stack when we try
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 		   to save and restore the FP registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 		   Given that GCC by default generates code that uses the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 		   FP registers, PAL_clrfen is not useful except for DoS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 		   attacks.  So turn the bleeding FPU back on and be done
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 		   with it.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 		current_thread_info()->pcb.flags |= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 		__reload_thread(&current_thread_info()->pcb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	      case 5: /* illoc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 	      default: /* unexpected instruction-fault type */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 		      ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	send_sig_fault(SIGILL, ILL_ILLOPC, (void __user *)regs->pc, 0, current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) /* There is an ifdef in the PALcode in MILO that enables a 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)    "kernel debugging entry point" as an unprivileged call_pal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)    We don't want to have anything to do with it, but unfortunately
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)    several versions of MILO included in distributions have it enabled,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)    and if we don't put something on the entry point we'll oops.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) asmlinkage void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) do_entDbg(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	die_if_kernel("Instruction fault", regs, 0, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	force_sig_fault(SIGILL, ILL_ILLOPC, (void __user *)regs->pc, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)  * entUna has a different register layout to be reasonably simple. It
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)  * needs access to all the integer registers (the kernel doesn't use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)  * fp-regs), and it needs to have them in order for simpler access.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)  * Due to the non-standard register layout (and because we don't want
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)  * to handle floating-point regs), user-mode unaligned accesses are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)  * handled separately by do_entUnaUser below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)  * Oh, btw, we don't handle the "gp" register correctly, but if we fault
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)  * on a gp-register unaligned load/store, something is _very_ wrong
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)  * in the kernel anyway..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) struct allregs {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	unsigned long regs[32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	unsigned long ps, pc, gp, a0, a1, a2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) struct unaligned_stat {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 	unsigned long count, va, pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) } unaligned[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) /* Macro for exception fixup code to access integer registers.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) #define una_reg(r)  (_regs[(r) >= 16 && (r) <= 18 ? (r)+19 : (r)])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) asmlinkage void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) do_entUna(void * va, unsigned long opcode, unsigned long reg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 	  struct allregs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 	long error, tmp1, tmp2, tmp3, tmp4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 	unsigned long pc = regs->pc - 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 	unsigned long *_regs = regs->regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 	const struct exception_table_entry *fixup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 	unaligned[0].count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	unaligned[0].va = (unsigned long) va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	unaligned[0].pc = pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	/* We don't want to use the generic get/put unaligned macros as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 	   we want to trap exceptions.  Only if we actually get an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	   exception will we decide whether we should have caught it.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 	switch (opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	case 0x0c: /* ldwu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 		__asm__ __volatile__(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 		"1:	ldq_u %1,0(%3)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 		"2:	ldq_u %2,1(%3)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 		"	extwl %1,%3,%1\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 		"	extwh %2,%3,%2\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 		"3:\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 		EXC(1b,3b,%1,%0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 		EXC(2b,3b,%2,%0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 			: "r"(va), "0"(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 			goto got_exception;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 		una_reg(reg) = tmp1|tmp2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 	case 0x28: /* ldl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 		__asm__ __volatile__(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 		"1:	ldq_u %1,0(%3)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 		"2:	ldq_u %2,3(%3)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 		"	extll %1,%3,%1\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 		"	extlh %2,%3,%2\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 		"3:\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 		EXC(1b,3b,%1,%0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 		EXC(2b,3b,%2,%0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 			: "r"(va), "0"(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 			goto got_exception;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 		una_reg(reg) = (int)(tmp1|tmp2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 	case 0x29: /* ldq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 		__asm__ __volatile__(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 		"1:	ldq_u %1,0(%3)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 		"2:	ldq_u %2,7(%3)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 		"	extql %1,%3,%1\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 		"	extqh %2,%3,%2\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 		"3:\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 		EXC(1b,3b,%1,%0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 		EXC(2b,3b,%2,%0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 			: "r"(va), "0"(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 			goto got_exception;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 		una_reg(reg) = tmp1|tmp2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 	/* Note that the store sequences do not indicate that they change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 	   memory because it _should_ be affecting nothing in this context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 	   (Otherwise we have other, much larger, problems.)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 	case 0x0d: /* stw */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 		__asm__ __volatile__(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 		"1:	ldq_u %2,1(%5)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 		"2:	ldq_u %1,0(%5)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 		"	inswh %6,%5,%4\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 		"	inswl %6,%5,%3\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 		"	mskwh %2,%5,%2\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 		"	mskwl %1,%5,%1\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 		"	or %2,%4,%2\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 		"	or %1,%3,%1\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 		"3:	stq_u %2,1(%5)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 		"4:	stq_u %1,0(%5)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 		"5:\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 		EXC(1b,5b,%2,%0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 		EXC(2b,5b,%1,%0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 		EXC(3b,5b,$31,%0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 		EXC(4b,5b,$31,%0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 			  "=&r"(tmp3), "=&r"(tmp4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 			: "r"(va), "r"(una_reg(reg)), "0"(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 			goto got_exception;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 	case 0x2c: /* stl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 		__asm__ __volatile__(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 		"1:	ldq_u %2,3(%5)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 		"2:	ldq_u %1,0(%5)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 		"	inslh %6,%5,%4\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 		"	insll %6,%5,%3\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 		"	msklh %2,%5,%2\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 		"	mskll %1,%5,%1\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 		"	or %2,%4,%2\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 		"	or %1,%3,%1\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 		"3:	stq_u %2,3(%5)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 		"4:	stq_u %1,0(%5)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 		"5:\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 		EXC(1b,5b,%2,%0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 		EXC(2b,5b,%1,%0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 		EXC(3b,5b,$31,%0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 		EXC(4b,5b,$31,%0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 			  "=&r"(tmp3), "=&r"(tmp4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 			: "r"(va), "r"(una_reg(reg)), "0"(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 			goto got_exception;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 	case 0x2d: /* stq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 		__asm__ __volatile__(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 		"1:	ldq_u %2,7(%5)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 		"2:	ldq_u %1,0(%5)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 		"	insqh %6,%5,%4\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 		"	insql %6,%5,%3\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 		"	mskqh %2,%5,%2\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 		"	mskql %1,%5,%1\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 		"	or %2,%4,%2\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 		"	or %1,%3,%1\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 		"3:	stq_u %2,7(%5)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 		"4:	stq_u %1,0(%5)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 		"5:\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 		EXC(1b,5b,%2,%0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 		EXC(2b,5b,%1,%0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 		EXC(3b,5b,$31,%0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 		EXC(4b,5b,$31,%0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 			  "=&r"(tmp3), "=&r"(tmp4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 			: "r"(va), "r"(una_reg(reg)), "0"(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 			goto got_exception;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 	printk("Bad unaligned kernel access at %016lx: %p %lx %lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 		pc, va, opcode, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 	do_exit(SIGSEGV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) got_exception:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 	/* Ok, we caught the exception, but we don't want it.  Is there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 	   someone to pass it along to?  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 	if ((fixup = search_exception_tables(pc)) != 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 		unsigned long newpc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 		newpc = fixup_exception(una_reg, fixup, pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 		printk("Forwarding unaligned exception at %lx (%lx)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 		       pc, newpc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 		regs->pc = newpc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 	 * Yikes!  No one to forward the exception to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 	 * Since the registers are in a weird format, dump them ourselves.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)  	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 	printk("%s(%d): unhandled unaligned exception\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 	       current->comm, task_pid_nr(current));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 	printk("pc = [<%016lx>]  ra = [<%016lx>]  ps = %04lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 	       pc, una_reg(26), regs->ps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 	printk("r0 = %016lx  r1 = %016lx  r2 = %016lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 	       una_reg(0), una_reg(1), una_reg(2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 	printk("r3 = %016lx  r4 = %016lx  r5 = %016lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609)  	       una_reg(3), una_reg(4), una_reg(5));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 	printk("r6 = %016lx  r7 = %016lx  r8 = %016lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 	       una_reg(6), una_reg(7), una_reg(8));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 	printk("r9 = %016lx  r10= %016lx  r11= %016lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 	       una_reg(9), una_reg(10), una_reg(11));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 	printk("r12= %016lx  r13= %016lx  r14= %016lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 	       una_reg(12), una_reg(13), una_reg(14));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 	printk("r15= %016lx\n", una_reg(15));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 	printk("r16= %016lx  r17= %016lx  r18= %016lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 	       una_reg(16), una_reg(17), una_reg(18));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 	printk("r19= %016lx  r20= %016lx  r21= %016lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)  	       una_reg(19), una_reg(20), una_reg(21));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621)  	printk("r22= %016lx  r23= %016lx  r24= %016lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 	       una_reg(22), una_reg(23), una_reg(24));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 	printk("r25= %016lx  r27= %016lx  r28= %016lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 	       una_reg(25), una_reg(27), una_reg(28));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 	printk("gp = %016lx  sp = %p\n", regs->gp, regs+1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 	dik_show_code((unsigned int *)pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 	dik_show_trace((unsigned long *)(regs+1), KERN_DEFAULT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 	if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 		printk("die_if_kernel recursion detected.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 		local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 		while (1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 	do_exit(SIGSEGV);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639)  * Convert an s-floating point value in memory format to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)  * corresponding value in register format.  The exponent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)  * needs to be remapped to preserve non-finite values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642)  * (infinities, not-a-numbers, denormals).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) static inline unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) s_mem_to_reg (unsigned long s_mem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 	unsigned long frac    = (s_mem >>  0) & 0x7fffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 	unsigned long sign    = (s_mem >> 31) & 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 	unsigned long exp_msb = (s_mem >> 30) & 0x1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 	unsigned long exp_low = (s_mem >> 23) & 0x7f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 	unsigned long exp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 	exp = (exp_msb << 10) | exp_low;	/* common case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 	if (exp_msb) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 		if (exp_low == 0x7f) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 			exp = 0x7ff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 		if (exp_low == 0x00) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 			exp = 0x000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 			exp |= (0x7 << 7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 	return (sign << 63) | (exp << 52) | (frac << 29);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)  * Convert an s-floating point value in register format to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)  * corresponding value in memory format.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) static inline unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) s_reg_to_mem (unsigned long s_reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 	return ((s_reg >> 62) << 30) | ((s_reg << 5) >> 34);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)  * Handle user-level unaligned fault.  Handling user-level unaligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)  * faults is *extremely* slow and produces nasty messages.  A user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)  * program *should* fix unaligned faults ASAP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)  * Notice that we have (almost) the regular kernel stack layout here,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684)  * so finding the appropriate registers is a little more difficult
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)  * than in the kernel case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)  * Finally, we handle regular integer load/stores only.  In
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)  * particular, load-linked/store-conditionally and floating point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)  * load/stores are not supported.  The former make no sense with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690)  * unaligned faults (they are guaranteed to fail) and I don't think
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)  * the latter will occur in any decent program.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)  * Sigh. We *do* have to handle some FP operations, because GCC will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)  * uses them as temporary storage for integer memory to memory copies.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)  * However, we need to deal with stt/ldt and sts/lds only.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) #define OP_INT_MASK	( 1L << 0x28 | 1L << 0x2c   /* ldl stl */	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) 			| 1L << 0x29 | 1L << 0x2d   /* ldq stq */	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) 			| 1L << 0x0c | 1L << 0x0d   /* ldwu stw */	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) 			| 1L << 0x0a | 1L << 0x0e ) /* ldbu stb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) #define OP_WRITE_MASK	( 1L << 0x26 | 1L << 0x27   /* sts stt */	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 			| 1L << 0x2c | 1L << 0x2d   /* stl stq */	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) 			| 1L << 0x0d | 1L << 0x0e ) /* stw stb */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) #define R(x)	((size_t) &((struct pt_regs *)0)->x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) static int unauser_reg_offsets[32] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) 	R(r0), R(r1), R(r2), R(r3), R(r4), R(r5), R(r6), R(r7), R(r8),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) 	/* r9 ... r15 are stored in front of regs.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) 	-56, -48, -40, -32, -24, -16, -8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) 	R(r16), R(r17), R(r18),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) 	R(r19), R(r20), R(r21), R(r22), R(r23), R(r24), R(r25), R(r26),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) 	R(r27), R(r28), R(gp),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) 	0, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) #undef R
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) asmlinkage void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) do_entUnaUser(void __user * va, unsigned long opcode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) 	      unsigned long reg, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) 	static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) 	unsigned long tmp1, tmp2, tmp3, tmp4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) 	unsigned long fake_reg, *reg_addr = &fake_reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) 	int si_code;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) 	long error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) 	/* Check the UAC bits to decide what the user wants us to do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) 	   with the unaliged access.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) 	if (!(current_thread_info()->status & TS_UAC_NOPRINT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) 		if (__ratelimit(&ratelimit)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) 			printk("%s(%d): unaligned trap at %016lx: %p %lx %ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) 			       current->comm, task_pid_nr(current),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) 			       regs->pc - 4, va, opcode, reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) 	if ((current_thread_info()->status & TS_UAC_SIGBUS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) 		goto give_sigbus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) 	/* Not sure why you'd want to use this, but... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) 	if ((current_thread_info()->status & TS_UAC_NOFIX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) 	/* Don't bother reading ds in the access check since we already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) 	   know that this came from the user.  Also rely on the fact that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) 	   the page at TASK_SIZE is unmapped and so can't be touched anyway. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) 	if ((unsigned long)va >= TASK_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) 		goto give_sigsegv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) 	++unaligned[1].count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) 	unaligned[1].va = (unsigned long)va;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) 	unaligned[1].pc = regs->pc - 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) 	if ((1L << opcode) & OP_INT_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) 		/* it's an integer load/store */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) 		if (reg < 30) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) 			reg_addr = (unsigned long *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) 			  ((char *)regs + unauser_reg_offsets[reg]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) 		} else if (reg == 30) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) 			/* usp in PAL regs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) 			fake_reg = rdusp();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) 			/* zero "register" */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) 			fake_reg = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) 	/* We don't want to use the generic get/put unaligned macros as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) 	   we want to trap exceptions.  Only if we actually get an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) 	   exception will we decide whether we should have caught it.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) 	switch (opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) 	case 0x0c: /* ldwu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) 		__asm__ __volatile__(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) 		"1:	ldq_u %1,0(%3)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) 		"2:	ldq_u %2,1(%3)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) 		"	extwl %1,%3,%1\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) 		"	extwh %2,%3,%2\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) 		"3:\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) 		EXC(1b,3b,%1,%0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) 		EXC(2b,3b,%2,%0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) 			: "r"(va), "0"(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) 			goto give_sigsegv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) 		*reg_addr = tmp1|tmp2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) 	case 0x22: /* lds */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) 		__asm__ __volatile__(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) 		"1:	ldq_u %1,0(%3)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) 		"2:	ldq_u %2,3(%3)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) 		"	extll %1,%3,%1\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) 		"	extlh %2,%3,%2\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) 		"3:\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) 		EXC(1b,3b,%1,%0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) 		EXC(2b,3b,%2,%0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) 			: "r"(va), "0"(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) 			goto give_sigsegv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) 		alpha_write_fp_reg(reg, s_mem_to_reg((int)(tmp1|tmp2)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) 	case 0x23: /* ldt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) 		__asm__ __volatile__(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) 		"1:	ldq_u %1,0(%3)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) 		"2:	ldq_u %2,7(%3)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) 		"	extql %1,%3,%1\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) 		"	extqh %2,%3,%2\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) 		"3:\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) 		EXC(1b,3b,%1,%0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) 		EXC(2b,3b,%2,%0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) 			: "r"(va), "0"(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) 			goto give_sigsegv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) 		alpha_write_fp_reg(reg, tmp1|tmp2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) 	case 0x28: /* ldl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) 		__asm__ __volatile__(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) 		"1:	ldq_u %1,0(%3)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) 		"2:	ldq_u %2,3(%3)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) 		"	extll %1,%3,%1\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) 		"	extlh %2,%3,%2\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) 		"3:\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) 		EXC(1b,3b,%1,%0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) 		EXC(2b,3b,%2,%0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) 			: "r"(va), "0"(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) 			goto give_sigsegv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) 		*reg_addr = (int)(tmp1|tmp2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) 	case 0x29: /* ldq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) 		__asm__ __volatile__(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) 		"1:	ldq_u %1,0(%3)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) 		"2:	ldq_u %2,7(%3)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) 		"	extql %1,%3,%1\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) 		"	extqh %2,%3,%2\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) 		"3:\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) 		EXC(1b,3b,%1,%0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) 		EXC(2b,3b,%2,%0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) 			: "r"(va), "0"(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) 			goto give_sigsegv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) 		*reg_addr = tmp1|tmp2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) 	/* Note that the store sequences do not indicate that they change
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) 	   memory because it _should_ be affecting nothing in this context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) 	   (Otherwise we have other, much larger, problems.)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) 	case 0x0d: /* stw */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) 		__asm__ __volatile__(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) 		"1:	ldq_u %2,1(%5)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) 		"2:	ldq_u %1,0(%5)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) 		"	inswh %6,%5,%4\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) 		"	inswl %6,%5,%3\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) 		"	mskwh %2,%5,%2\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) 		"	mskwl %1,%5,%1\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) 		"	or %2,%4,%2\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) 		"	or %1,%3,%1\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) 		"3:	stq_u %2,1(%5)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) 		"4:	stq_u %1,0(%5)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) 		"5:\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) 		EXC(1b,5b,%2,%0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) 		EXC(2b,5b,%1,%0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) 		EXC(3b,5b,$31,%0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) 		EXC(4b,5b,$31,%0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) 			  "=&r"(tmp3), "=&r"(tmp4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) 			: "r"(va), "r"(*reg_addr), "0"(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) 			goto give_sigsegv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) 	case 0x26: /* sts */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) 		fake_reg = s_reg_to_mem(alpha_read_fp_reg(reg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) 	case 0x2c: /* stl */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) 		__asm__ __volatile__(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) 		"1:	ldq_u %2,3(%5)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) 		"2:	ldq_u %1,0(%5)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) 		"	inslh %6,%5,%4\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) 		"	insll %6,%5,%3\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) 		"	msklh %2,%5,%2\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) 		"	mskll %1,%5,%1\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) 		"	or %2,%4,%2\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) 		"	or %1,%3,%1\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) 		"3:	stq_u %2,3(%5)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) 		"4:	stq_u %1,0(%5)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) 		"5:\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) 		EXC(1b,5b,%2,%0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) 		EXC(2b,5b,%1,%0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) 		EXC(3b,5b,$31,%0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) 		EXC(4b,5b,$31,%0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) 			  "=&r"(tmp3), "=&r"(tmp4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) 			: "r"(va), "r"(*reg_addr), "0"(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) 			goto give_sigsegv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) 	case 0x27: /* stt */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) 		fake_reg = alpha_read_fp_reg(reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) 	case 0x2d: /* stq */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) 		__asm__ __volatile__(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) 		"1:	ldq_u %2,7(%5)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) 		"2:	ldq_u %1,0(%5)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) 		"	insqh %6,%5,%4\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) 		"	insql %6,%5,%3\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) 		"	mskqh %2,%5,%2\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) 		"	mskql %1,%5,%1\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) 		"	or %2,%4,%2\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) 		"	or %1,%3,%1\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) 		"3:	stq_u %2,7(%5)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) 		"4:	stq_u %1,0(%5)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) 		"5:\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) 		EXC(1b,5b,%2,%0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) 		EXC(2b,5b,%1,%0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) 		EXC(3b,5b,$31,%0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) 		EXC(4b,5b,$31,%0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) 			: "=r"(error), "=&r"(tmp1), "=&r"(tmp2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) 			  "=&r"(tmp3), "=&r"(tmp4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) 			: "r"(va), "r"(*reg_addr), "0"(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) 		if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) 			goto give_sigsegv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) 		/* What instruction were you trying to use, exactly?  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) 		goto give_sigbus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) 	/* Only integer loads should get here; everyone else returns early. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) 	if (reg == 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) 		wrusp(fake_reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) give_sigsegv:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) 	regs->pc -= 4;  /* make pc point to faulting insn */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) 	/* We need to replicate some of the logic in mm/fault.c,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) 	   since we don't have access to the fault code in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) 	   exception handling return path.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) 	if ((unsigned long)va >= TASK_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) 		si_code = SEGV_ACCERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) 		struct mm_struct *mm = current->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) 		mmap_read_lock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) 		if (find_vma(mm, (unsigned long)va))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) 			si_code = SEGV_ACCERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) 			si_code = SEGV_MAPERR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) 		mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) 	send_sig_fault(SIGSEGV, si_code, va, 0, current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) give_sigbus:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) 	regs->pc -= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) 	send_sig_fault(SIGBUS, BUS_ADRALN, va, 0, current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) 	return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) trap_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) 	/* Tell PAL-code what global pointer we want in the kernel.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) 	register unsigned long gptr __asm__("$29");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) 	wrkgp(gptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) 	/* Hack for Multia (UDB) and JENSEN: some of their SRMs have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) 	   a bug in the handling of the opDEC fault.  Fix it up if so.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) 	if (implver() == IMPLVER_EV4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) 		opDEC_check();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) 	wrent(entArith, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) 	wrent(entMM, 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) 	wrent(entIF, 3);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) 	wrent(entUna, 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) 	wrent(entSys, 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) 	wrent(entDbg, 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) }