Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * unaligned.c: Unaligned load/store trap handling with special
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *              cases for the kernel to do them more quickly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Copyright (C) 1996,2008 David S. Miller (davem@davemloft.net)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/jiffies.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/extable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <asm/asi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <asm/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <asm/pstate.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <asm/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <linux/perf_event.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <linux/ratelimit.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <linux/context_tracking.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include <asm/fpumacro.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #include <asm/setup.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #include "entry.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #include "kernel.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) enum direction {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	load,    /* ld, ldd, ldh, ldsh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	store,   /* st, std, sth, stsh */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	both,    /* Swap, ldstub, cas, ... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	fpld,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	fpst,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	invalid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) static inline enum direction decode_direction(unsigned int insn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	unsigned long tmp = (insn >> 21) & 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	if (!tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 		return load;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 		switch ((insn>>19)&0xf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 		case 15: /* swap* */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 			return both;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 			return store;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) /* 16 = double-word, 8 = extra-word, 4 = word, 2 = half-word */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) static inline int decode_access_size(struct pt_regs *regs, unsigned int insn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	unsigned int tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	tmp = ((insn >> 19) & 0xf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	if (tmp == 11 || tmp == 14) /* ldx/stx */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 		return 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	tmp &= 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	if (!tmp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 		return 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	else if (tmp == 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 		return 16;	/* ldd/std - Although it is actually 8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	else if (tmp == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 		return 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 		printk("Impossible unaligned trap. insn=%08x\n", insn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 		die_if_kernel("Byte sized unaligned access?!?!", regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 		/* GCC should never warn that control reaches the end
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 		 * of this function without returning a value because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 		 * die_if_kernel() is marked with attribute 'noreturn'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 		 * Alas, some versions do...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) static inline int decode_asi(unsigned int insn, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	if (insn & 0x800000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 		if (insn & 0x2000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 			return (unsigned char)(regs->tstate >> 24);	/* %asi */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 			return (unsigned char)(insn >> 5);		/* imm_asi */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 		return ASI_P;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) /* 0x400000 = signed, 0 = unsigned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) static inline int decode_signedness(unsigned int insn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	return (insn & 0x400000);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 				       unsigned int rd, int from_kernel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	if (rs2 >= 16 || rs1 >= 16 || rd >= 16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 		if (from_kernel != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 			__asm__ __volatile__("flushw");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 			flushw_user();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static inline long sign_extend_imm13(long imm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	return imm << 51 >> 51;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	unsigned long value, fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	if (reg < 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 		return (!reg ? 0 : regs->u_regs[reg]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	fp = regs->u_regs[UREG_FP];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	if (regs->tstate & TSTATE_PRIV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 		struct reg_window *win;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 		win = (struct reg_window *)(fp + STACK_BIAS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 		value = win->locals[reg - 16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	} else if (!test_thread_64bit_stack(fp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 		struct reg_window32 __user *win32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 		win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 		get_user(value, &win32->locals[reg - 16]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 		struct reg_window __user *win;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 		win = (struct reg_window __user *)(fp + STACK_BIAS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 		get_user(value, &win->locals[reg - 16]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	return value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) static unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 	unsigned long fp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	if (reg < 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 		return &regs->u_regs[reg];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	fp = regs->u_regs[UREG_FP];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	if (regs->tstate & TSTATE_PRIV) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 		struct reg_window *win;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 		win = (struct reg_window *)(fp + STACK_BIAS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 		return &win->locals[reg - 16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	} else if (!test_thread_64bit_stack(fp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 		struct reg_window32 *win32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 		win32 = (struct reg_window32 *)((unsigned long)((u32)fp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 		return (unsigned long *)&win32->locals[reg - 16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 		struct reg_window *win;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 		win = (struct reg_window *)(fp + STACK_BIAS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 		return &win->locals[reg - 16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) unsigned long compute_effective_address(struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 					unsigned int insn, unsigned int rd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	unsigned int rs1 = (insn >> 14) & 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	unsigned int rs2 = insn & 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	if (insn & 0x2000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 		maybe_flush_windows(rs1, 0, rd, from_kernel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 		addr = (fetch_reg(rs1, regs) + sign_extend_imm13(insn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 		maybe_flush_windows(rs1, rs2, rd, from_kernel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 		addr = (fetch_reg(rs1, regs) + fetch_reg(rs2, regs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	if (!from_kernel && test_thread_flag(TIF_32BIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 		addr &= 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) /* This is just to make gcc think die_if_kernel does return... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) static void __used unaligned_panic(char *str, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	die_if_kernel(str, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) extern int do_int_load(unsigned long *dest_reg, int size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 		       unsigned long *saddr, int is_signed, int asi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) extern int __do_int_store(unsigned long *dst_addr, int size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 			  unsigned long src_val, int asi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) static inline int do_int_store(int reg_num, int size, unsigned long *dst_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 			       struct pt_regs *regs, int asi, int orig_asi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	unsigned long zero = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	unsigned long *src_val_p = &zero;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	unsigned long src_val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	if (size == 16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 		size = 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 		zero = (((long)(reg_num ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 		        (unsigned int)fetch_reg(reg_num, regs) : 0)) << 32) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 			(unsigned int)fetch_reg(reg_num + 1, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	} else if (reg_num) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 		src_val_p = fetch_reg_addr(reg_num, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	src_val = *src_val_p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	if (unlikely(asi != orig_asi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 		switch (size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 		case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 			src_val = swab16(src_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 		case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 			src_val = swab32(src_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 		case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 			src_val = swab64(src_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 		case 16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 			BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	return __do_int_store(dst_addr, size, src_val, asi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) static inline void advance(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	regs->tpc   = regs->tnpc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	regs->tnpc += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	if (test_thread_flag(TIF_32BIT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 		regs->tpc &= 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 		regs->tnpc &= 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) static inline int floating_point_load_or_store_p(unsigned int insn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	return (insn >> 24) & 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) static inline int ok_for_kernel(unsigned int insn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	return !floating_point_load_or_store_p(insn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) static void kernel_mna_trap_fault(int fixup_tstate_asi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	struct pt_regs *regs = current_thread_info()->kern_una_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	unsigned int insn = current_thread_info()->kern_una_insn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	const struct exception_table_entry *entry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	entry = search_exception_tables(regs->tpc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	if (!entry) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 		unsigned long address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 		address = compute_effective_address(regs, insn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 						    ((insn >> 25) & 0x1f));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)         	if (address < PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)                 	printk(KERN_ALERT "Unable to handle kernel NULL "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 			       "pointer dereference in mna handler");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)         	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)                 	printk(KERN_ALERT "Unable to handle kernel paging "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 			       "request in mna handler");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	        printk(KERN_ALERT " at virtual address %016lx\n",address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 		printk(KERN_ALERT "current->{active_,}mm->context = %016lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 			(current->mm ? CTX_HWBITS(current->mm->context) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 			CTX_HWBITS(current->active_mm->context)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 		printk(KERN_ALERT "current->{active_,}mm->pgd = %016lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 			(current->mm ? (unsigned long) current->mm->pgd :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 			(unsigned long) current->active_mm->pgd));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	        die_if_kernel("Oops", regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 		/* Not reached */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	regs->tpc = entry->fixup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	regs->tnpc = regs->tpc + 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	if (fixup_tstate_asi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 		regs->tstate &= ~TSTATE_ASI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 		regs->tstate |= (ASI_AIUS << 24UL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) static void log_unaligned(struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	if (__ratelimit(&ratelimit)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 		printk("Kernel unaligned access at TPC[%lx] %pS\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 		       regs->tpc, (void *) regs->tpc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	enum direction dir = decode_direction(insn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 	int size = decode_access_size(regs, insn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	int orig_asi, asi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	current_thread_info()->kern_una_regs = regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	current_thread_info()->kern_una_insn = insn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	orig_asi = asi = decode_asi(insn, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	/* If this is a {get,put}_user() on an unaligned userspace pointer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	 * just signal a fault and do not log the event.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	if (asi == ASI_AIUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 		kernel_mna_trap_fault(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	log_unaligned(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	if (!ok_for_kernel(insn) || dir == both) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 		printk("Unsupported unaligned load/store trap for kernel "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 		       "at <%016lx>.\n", regs->tpc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 		unaligned_panic("Kernel does fpu/atomic "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 				"unaligned load/store.", regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 		kernel_mna_trap_fault(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 		unsigned long addr, *reg_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 		int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 		addr = compute_effective_address(regs, insn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 						 ((insn >> 25) & 0x1f));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 		perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 		switch (asi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 		case ASI_NL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 		case ASI_AIUPL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 		case ASI_AIUSL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 		case ASI_PL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 		case ASI_SL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 		case ASI_PNFL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 		case ASI_SNFL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 			asi &= ~0x08;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 		switch (dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 		case load:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 			reg_addr = fetch_reg_addr(((insn>>25)&0x1f), regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 			err = do_int_load(reg_addr, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 					  (unsigned long *) addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 					  decode_signedness(insn), asi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 			if (likely(!err) && unlikely(asi != orig_asi)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 				unsigned long val_in = *reg_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 				switch (size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 				case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 					val_in = swab16(val_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 				case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 					val_in = swab32(val_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 				case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 					val_in = swab64(val_in);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 				case 16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 				default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 					BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 					break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 				}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 				*reg_addr = val_in;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 		case store:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 			err = do_int_store(((insn>>25)&0x1f), size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 					   (unsigned long *) addr, regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 					   asi, orig_asi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 			panic("Impossible kernel unaligned trap.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 			/* Not reached... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 		if (unlikely(err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 			kernel_mna_trap_fault(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 			advance(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) int handle_popc(u32 insn, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	int ret, rd = ((insn >> 25) & 0x1f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	u64 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	                        
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	if (insn & 0x2000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 		maybe_flush_windows(0, 0, rd, from_kernel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 		value = sign_extend_imm13(insn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 		maybe_flush_windows(0, insn & 0x1f, rd, from_kernel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 		value = fetch_reg(insn & 0x1f, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	ret = hweight64(value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	if (rd < 16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 		if (rd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 			regs->u_regs[rd] = ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 		unsigned long fp = regs->u_regs[UREG_FP];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 		if (!test_thread_64bit_stack(fp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 			struct reg_window32 __user *win32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 			win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 			put_user(ret, &win32->locals[rd - 16]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 			struct reg_window __user *win;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 			win = (struct reg_window __user *)(fp + STACK_BIAS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 			put_user(ret, &win->locals[rd - 16]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 	advance(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) extern void do_fpother(struct pt_regs *regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) extern void do_privact(struct pt_regs *regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) extern void sun4v_data_access_exception(struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 					unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 					unsigned long type_ctx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) int handle_ldf_stq(u32 insn, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 	unsigned long addr = compute_effective_address(regs, insn, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 	int freg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 	struct fpustate *f = FPUSTATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 	int asi = decode_asi(insn, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 	int flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	save_and_clear_fpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 	current_thread_info()->xfsr[0] &= ~0x1c000;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	if (insn & 0x200000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 		/* STQ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 		u64 first = 0, second = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 		
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 		freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 		flag = (freg < 32) ? FPRS_DL : FPRS_DU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 		if (freg & 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 			current_thread_info()->xfsr[0] |= (6 << 14) /* invalid_fp_register */;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 			do_fpother(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 		if (current_thread_info()->fpsaved[0] & flag) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 			first = *(u64 *)&f->regs[freg];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 			second = *(u64 *)&f->regs[freg+2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 		if (asi < 0x80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 			do_privact(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 		switch (asi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 		case ASI_P:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 		case ASI_S: break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 		case ASI_PL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 		case ASI_SL: 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 			{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 				/* Need to convert endians */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 				u64 tmp = __swab64p(&first);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 				
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 				first = __swab64p(&second);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 				second = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 			if (tlb_type == hypervisor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 				sun4v_data_access_exception(regs, addr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 				spitfire_data_access_exception(regs, 0, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 		if (put_user (first >> 32, (u32 __user *)addr) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 		    __put_user ((u32)first, (u32 __user *)(addr + 4)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 		    __put_user (second >> 32, (u32 __user *)(addr + 8)) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 		    __put_user ((u32)second, (u32 __user *)(addr + 12))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 			if (tlb_type == hypervisor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 				sun4v_data_access_exception(regs, addr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 				spitfire_data_access_exception(regs, 0, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 		    	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 		/* LDF, LDDF, LDQF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 		u32 data[4] __attribute__ ((aligned(8)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 		int size, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 		int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 		if (asi < 0x80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 			do_privact(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 		} else if (asi > ASI_SNFL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 			if (tlb_type == hypervisor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 				sun4v_data_access_exception(regs, addr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 				spitfire_data_access_exception(regs, 0, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 		switch (insn & 0x180000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 		case 0x000000: size = 1; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 		case 0x100000: size = 4; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 		default: size = 2; break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 		if (size == 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 			freg = (insn >> 25) & 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 			freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 		flag = (freg < 32) ? FPRS_DL : FPRS_DU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 		for (i = 0; i < size; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 			data[i] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 		
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 		err = get_user (data[0], (u32 __user *) addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 		if (!err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 			for (i = 1; i < size; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 				err |= __get_user (data[i], (u32 __user *)(addr + 4*i));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 		if (err && !(asi & 0x2 /* NF */)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 			if (tlb_type == hypervisor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 				sun4v_data_access_exception(regs, addr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 				spitfire_data_access_exception(regs, 0, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 			return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 		if (asi & 0x8) /* Little */ {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 			u64 tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 			switch (size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 			case 1: data[0] = le32_to_cpup(data + 0); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 			default:*(u64 *)(data + 0) = le64_to_cpup((u64 *)(data + 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 			case 4: tmp = le64_to_cpup((u64 *)(data + 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 				*(u64 *)(data + 0) = le64_to_cpup((u64 *)(data + 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 				*(u64 *)(data + 2) = tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 		if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 			current_thread_info()->fpsaved[0] = FPRS_FEF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 			current_thread_info()->gsr[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 		if (!(current_thread_info()->fpsaved[0] & flag)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 			if (freg < 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 				memset(f->regs, 0, 32*sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 				memset(f->regs+32, 0, 32*sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 		memcpy(f->regs + freg, data, size * 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 		current_thread_info()->fpsaved[0] |= flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 	advance(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) void handle_ld_nf(u32 insn, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 	int rd = ((insn >> 25) & 0x1f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 	int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 	unsigned long *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 	                        
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 	perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 	maybe_flush_windows(0, 0, rd, from_kernel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 	reg = fetch_reg_addr(rd, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 	if (from_kernel || rd < 16) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 		reg[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 		if ((insn & 0x780000) == 0x180000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 			reg[1] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 	} else if (!test_thread_64bit_stack(regs->u_regs[UREG_FP])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 		put_user(0, (int __user *) reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 		if ((insn & 0x780000) == 0x180000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 			put_user(0, ((int __user *) reg) + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 		put_user(0, (unsigned long __user *) reg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 		if ((insn & 0x780000) == 0x180000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 			put_user(0, (unsigned long __user *) reg + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 	advance(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 	enum ctx_state prev_state = exception_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 	unsigned long pc = regs->tpc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 	unsigned long tstate = regs->tstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 	u32 insn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 	u64 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 	u8 freg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 	int flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 	struct fpustate *f = FPUSTATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 	if (tstate & TSTATE_PRIV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 		die_if_kernel("lddfmna from kernel", regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 	perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, sfar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 	if (test_thread_flag(TIF_32BIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 		pc = (u32)pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 	if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 		int asi = decode_asi(insn, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 		u32 first, second;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 		int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 		if ((asi > ASI_SNFL) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 		    (asi < ASI_P))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 			goto daex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 		first = second = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 		err = get_user(first, (u32 __user *)sfar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 		if (!err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 			err = get_user(second, (u32 __user *)(sfar + 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 		if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 			if (!(asi & 0x2))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 				goto daex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 			first = second = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) 		save_and_clear_fpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 		freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 		value = (((u64)first) << 32) | second;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 		if (asi & 0x8) /* Little */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 			value = __swab64p(&value);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 		flag = (freg < 32) ? FPRS_DL : FPRS_DU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 		if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 			current_thread_info()->fpsaved[0] = FPRS_FEF;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 			current_thread_info()->gsr[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 		if (!(current_thread_info()->fpsaved[0] & flag)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 			if (freg < 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 				memset(f->regs, 0, 32*sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 				memset(f->regs+32, 0, 32*sizeof(u32));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 		*(u64 *)(f->regs + freg) = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 		current_thread_info()->fpsaved[0] |= flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) daex:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 		if (tlb_type == hypervisor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 			sun4v_data_access_exception(regs, sfar, sfsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 			spitfire_data_access_exception(regs, sfsr, sfar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 	advance(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 	exception_exit(prev_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 	enum ctx_state prev_state = exception_enter();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 	unsigned long pc = regs->tpc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 	unsigned long tstate = regs->tstate;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 	u32 insn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 	u64 value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 	u8 freg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 	int flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) 	struct fpustate *f = FPUSTATE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 	if (tstate & TSTATE_PRIV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 		die_if_kernel("stdfmna from kernel", regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 	perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, sfar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 	if (test_thread_flag(TIF_32BIT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 		pc = (u32)pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 	if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) 		int asi = decode_asi(insn, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 		freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) 		value = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) 		flag = (freg < 32) ? FPRS_DL : FPRS_DU;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) 		if ((asi > ASI_SNFL) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) 		    (asi < ASI_P))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) 			goto daex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) 		save_and_clear_fpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 		if (current_thread_info()->fpsaved[0] & flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 			value = *(u64 *)&f->regs[freg];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 		switch (asi) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) 		case ASI_P:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 		case ASI_S: break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) 		case ASI_PL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) 		case ASI_SL: 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) 			value = __swab64p(&value); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) 		default: goto daex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) 		if (put_user (value >> 32, (u32 __user *) sfar) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) 		    __put_user ((u32)value, (u32 __user *)(sfar + 4)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 			goto daex;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) daex:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) 		if (tlb_type == hypervisor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) 			sun4v_data_access_exception(regs, sfar, sfsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) 			spitfire_data_access_exception(regs, sfsr, sfar);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) 	advance(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) 	exception_exit(prev_state);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) }