Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  *  linux/arch/arm/mm/alignment.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  *  Copyright (C) 1995  Linus Torvalds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  *  Modifications for ARM processor (c) 1995-2001 Russell King
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  *  Thumb alignment fault fixups (c) 2004 MontaVista Software, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8)  *  - Adapted from gdb/sim/arm/thumbemu.c -- Thumb instruction emulation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9)  *    Copyright (C) 1996, Cygnus Software Technologies Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/moduleparam.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/compiler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/sched/debug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/proc_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include <asm/cp15.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) #include <asm/system_info.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #include <asm/unaligned.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #include <asm/opcodes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #include "fault.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) #include "mm.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32)  * 32-bit misaligned trap handler (c) 1998 San Mehat (CCC) -July 1998
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33)  * /proc/sys/debug/alignment, modified and integrated into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34)  * Linux 2.1 by Russell King
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36)  * Speed optimisations and better fault handling by Russell King.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38)  * *** NOTE ***
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39)  * This code is not portable to processors with late data abort handling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41) #define CODING_BITS(i)	(i & 0x0e000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42) #define COND_BITS(i)	(i & 0xf0000000)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44) #define LDST_I_BIT(i)	(i & (1 << 26))		/* Immediate constant	*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45) #define LDST_P_BIT(i)	(i & (1 << 24))		/* Preindex		*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46) #define LDST_U_BIT(i)	(i & (1 << 23))		/* Add offset		*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47) #define LDST_W_BIT(i)	(i & (1 << 21))		/* Writeback		*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48) #define LDST_L_BIT(i)	(i & (1 << 20))		/* Load			*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50) #define LDST_P_EQ_U(i)	((((i) ^ ((i) >> 1)) & (1 << 23)) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52) #define LDSTHD_I_BIT(i)	(i & (1 << 22))		/* double/half-word immed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53) #define LDM_S_BIT(i)	(i & (1 << 22))		/* write CPSR from SPSR	*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55) #define RN_BITS(i)	((i >> 16) & 15)	/* Rn			*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56) #define RD_BITS(i)	((i >> 12) & 15)	/* Rd			*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57) #define RM_BITS(i)	(i & 15)		/* Rm			*/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59) #define REGMASK_BITS(i)	(i & 0xffff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60) #define OFFSET_BITS(i)	(i & 0x0fff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62) #define IS_SHIFT(i)	(i & 0x0ff0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63) #define SHIFT_BITS(i)	((i >> 7) & 0x1f)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64) #define SHIFT_TYPE(i)	(i & 0x60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65) #define SHIFT_LSL	0x00
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66) #define SHIFT_LSR	0x20
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67) #define SHIFT_ASR	0x40
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68) #define SHIFT_RORRRX	0x60
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70) #define BAD_INSTR 	0xdeadc0de
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72) /* Thumb-2 32 bit format per ARMv7 DDI0406A A6.3, either f800h,e800h,f800h */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73) #define IS_T32(hi16) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74) 	(((hi16) & 0xe000) == 0xe000 && ((hi16) & 0x1800))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76) static unsigned long ai_user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77) static unsigned long ai_sys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78) static void *ai_sys_last_pc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79) static unsigned long ai_skipped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80) static unsigned long ai_half;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81) static unsigned long ai_word;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82) static unsigned long ai_dword;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83) static unsigned long ai_multi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84) static int ai_usermode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85) static unsigned long cr_no_alignment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87) core_param(alignment, ai_usermode, int, 0600);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89) #define UM_WARN		(1 << 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90) #define UM_FIXUP	(1 << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91) #define UM_SIGNAL	(1 << 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93) /* Return true if and only if the ARMv6 unaligned access model is in use. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) static bool cpu_is_v6_unaligned(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) 	return cpu_architecture() >= CPU_ARCH_ARMv6 && get_cr() & CR_U;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) static int safe_usermode(int new_usermode, bool warn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) 	 * ARMv6 and later CPUs can perform unaligned accesses for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 	 * most single load and store instructions up to word size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) 	 * LDM, STM, LDRD and STRD still need to be handled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	 * Ignoring the alignment fault is not an option on these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 	 * CPUs since we spin re-faulting the instruction without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) 	 * making any progress.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	if (cpu_is_v6_unaligned() && !(new_usermode & (UM_FIXUP | UM_SIGNAL))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 		new_usermode |= UM_FIXUP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 		if (warn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 			pr_warn("alignment: ignoring faults is unsafe on this CPU.  Defaulting to fixup mode.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) 	return new_usermode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) #ifdef CONFIG_PROC_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) static const char *usermode_action[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) 	"ignored",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) 	"warn",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 	"fixup",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) 	"fixup+warn",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) 	"signal",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) 	"signal+warn"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) static int alignment_proc_show(struct seq_file *m, void *v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	seq_printf(m, "User:\t\t%lu\n", ai_user);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	seq_printf(m, "System:\t\t%lu (%pS)\n", ai_sys, ai_sys_last_pc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	seq_printf(m, "Skipped:\t%lu\n", ai_skipped);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	seq_printf(m, "Half:\t\t%lu\n", ai_half);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 	seq_printf(m, "Word:\t\t%lu\n", ai_word);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	if (cpu_architecture() >= CPU_ARCH_ARMv5TE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 		seq_printf(m, "DWord:\t\t%lu\n", ai_dword);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	seq_printf(m, "Multi:\t\t%lu\n", ai_multi);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	seq_printf(m, "User faults:\t%i (%s)\n", ai_usermode,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 			usermode_action[ai_usermode]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) static int alignment_proc_open(struct inode *inode, struct file *file)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	return single_open(file, alignment_proc_show, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) static ssize_t alignment_proc_write(struct file *file, const char __user *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) 				    size_t count, loff_t *pos)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 	char mode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156) 	if (count > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157) 		if (get_user(mode, buffer))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158) 			return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159) 		if (mode >= '0' && mode <= '5')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160) 			ai_usermode = safe_usermode(mode - '0', true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) static const struct proc_ops alignment_proc_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	.proc_open	= alignment_proc_open,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 	.proc_read	= seq_read,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) 	.proc_lseek	= seq_lseek,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	.proc_release	= single_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 	.proc_write	= alignment_proc_write,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) #endif /* CONFIG_PROC_FS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) union offset_union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) 	unsigned long un;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) 	  signed long sn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) #define TYPE_ERROR	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) #define TYPE_FAULT	1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) #define TYPE_LDST	2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) #define TYPE_DONE	3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) #ifdef __ARMEB__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) #define BE		1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) #define FIRST_BYTE_16	"mov	%1, %1, ror #8\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) #define FIRST_BYTE_32	"mov	%1, %1, ror #24\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) #define NEXT_BYTE	"ror #24"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) #define BE		0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) #define FIRST_BYTE_16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192) #define FIRST_BYTE_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193) #define NEXT_BYTE	"lsr #8"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) #define __get8_unaligned_check(ins,val,addr,err)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	__asm__(					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198)  ARM(	"1:	"ins"	%1, [%2], #1\n"	)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199)  THUMB(	"1:	"ins"	%1, [%2]\n"	)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200)  THUMB(	"	add	%2, %2, #1\n"	)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 	"2:\n"						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) 	"	.pushsection .text.fixup,\"ax\"\n"	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	"	.align	2\n"				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 	"3:	mov	%0, #1\n"			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	"	b	2b\n"				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 	"	.popsection\n"				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	"	.pushsection __ex_table,\"a\"\n"	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 	"	.align	3\n"				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 	"	.long	1b, 3b\n"			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 	"	.popsection\n"				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	: "=r" (err), "=&r" (val), "=r" (addr)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) 	: "0" (err), "2" (addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) #define __get16_unaligned_check(ins,val,addr)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215) 	do {							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216) 		unsigned int err = 0, v, a = addr;		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217) 		__get8_unaligned_check(ins,v,a,err);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218) 		val =  v << ((BE) ? 8 : 0);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219) 		__get8_unaligned_check(ins,v,a,err);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220) 		val |= v << ((BE) ? 0 : 8);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221) 		if (err)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222) 			goto fault;				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223) 	} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225) #define get16_unaligned_check(val,addr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226) 	__get16_unaligned_check("ldrb",val,addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228) #define get16t_unaligned_check(val,addr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) 	__get16_unaligned_check("ldrbt",val,addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) #define __get32_unaligned_check(ins,val,addr)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 	do {							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) 		unsigned int err = 0, v, a = addr;		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 		__get8_unaligned_check(ins,v,a,err);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 		val =  v << ((BE) ? 24 :  0);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 		__get8_unaligned_check(ins,v,a,err);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 		val |= v << ((BE) ? 16 :  8);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 		__get8_unaligned_check(ins,v,a,err);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 		val |= v << ((BE) ?  8 : 16);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 		__get8_unaligned_check(ins,v,a,err);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 		val |= v << ((BE) ?  0 : 24);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 		if (err)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 			goto fault;				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) #define get32_unaligned_check(val,addr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) 	__get32_unaligned_check("ldrb",val,addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) #define get32t_unaligned_check(val,addr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250) 	__get32_unaligned_check("ldrbt",val,addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252) #define __put16_unaligned_check(ins,val,addr)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253) 	do {							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254) 		unsigned int err = 0, v = val, a = addr;	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255) 		__asm__( FIRST_BYTE_16				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256) 	 ARM(	"1:	"ins"	%1, [%2], #1\n"	)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257) 	 THUMB(	"1:	"ins"	%1, [%2]\n"	)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258) 	 THUMB(	"	add	%2, %2, #1\n"	)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259) 		"	mov	%1, %1, "NEXT_BYTE"\n"		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260) 		"2:	"ins"	%1, [%2]\n"			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261) 		"3:\n"						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262) 		"	.pushsection .text.fixup,\"ax\"\n"	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263) 		"	.align	2\n"				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) 		"4:	mov	%0, #1\n"			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) 		"	b	3b\n"				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 		"	.popsection\n"				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 		"	.pushsection __ex_table,\"a\"\n"	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) 		"	.align	3\n"				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 		"	.long	1b, 4b\n"			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 		"	.long	2b, 4b\n"			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 		"	.popsection\n"				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 		: "=r" (err), "=&r" (v), "=&r" (a)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 		: "0" (err), "1" (v), "2" (a));			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 		if (err)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 			goto fault;				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 	} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) #define put16_unaligned_check(val,addr)  \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 	__put16_unaligned_check("strb",val,addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) #define put16t_unaligned_check(val,addr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 	__put16_unaligned_check("strbt",val,addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) #define __put32_unaligned_check(ins,val,addr)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	do {							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) 		unsigned int err = 0, v = val, a = addr;	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 		__asm__( FIRST_BYTE_32				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) 	 ARM(	"1:	"ins"	%1, [%2], #1\n"	)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289) 	 THUMB(	"1:	"ins"	%1, [%2]\n"	)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290) 	 THUMB(	"	add	%2, %2, #1\n"	)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291) 		"	mov	%1, %1, "NEXT_BYTE"\n"		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292) 	 ARM(	"2:	"ins"	%1, [%2], #1\n"	)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293) 	 THUMB(	"2:	"ins"	%1, [%2]\n"	)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294) 	 THUMB(	"	add	%2, %2, #1\n"	)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295) 		"	mov	%1, %1, "NEXT_BYTE"\n"		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296) 	 ARM(	"3:	"ins"	%1, [%2], #1\n"	)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297) 	 THUMB(	"3:	"ins"	%1, [%2]\n"	)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298) 	 THUMB(	"	add	%2, %2, #1\n"	)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299) 		"	mov	%1, %1, "NEXT_BYTE"\n"		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300) 		"4:	"ins"	%1, [%2]\n"			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301) 		"5:\n"						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302) 		"	.pushsection .text.fixup,\"ax\"\n"	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) 		"	.align	2\n"				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 		"6:	mov	%0, #1\n"			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 		"	b	5b\n"				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 		"	.popsection\n"				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) 		"	.pushsection __ex_table,\"a\"\n"	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 		"	.align	3\n"				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 		"	.long	1b, 6b\n"			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 		"	.long	2b, 6b\n"			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 		"	.long	3b, 6b\n"			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 		"	.long	4b, 6b\n"			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 		"	.popsection\n"				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 		: "=r" (err), "=&r" (v), "=&r" (a)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 		: "0" (err), "1" (v), "2" (a));			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 		if (err)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 			goto fault;				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 	} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) #define put32_unaligned_check(val,addr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 	__put32_unaligned_check("strb", val, addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) #define put32t_unaligned_check(val,addr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 	__put32_unaligned_check("strbt", val, addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327) do_alignment_finish_ldst(unsigned long addr, u32 instr, struct pt_regs *regs, union offset_union offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329) 	if (!LDST_U_BIT(instr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330) 		offset.un = -offset.un;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332) 	if (!LDST_P_BIT(instr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333) 		addr += offset.un;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335) 	if (!LDST_P_BIT(instr) || LDST_W_BIT(instr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336) 		regs->uregs[RN_BITS(instr)] = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) do_alignment_ldrhstrh(unsigned long addr, u32 instr, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	unsigned int rd = RD_BITS(instr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 	ai_half += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	if (user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 		goto user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	if (LDST_L_BIT(instr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 		unsigned long val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 		get16_unaligned_check(val, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 		/* signed half-word? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 		if (instr & 0x40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 			val = (signed long)((signed short) val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) 		regs->uregs[rd] = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) 		put16_unaligned_check(regs->uregs[rd], addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	return TYPE_LDST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363)  user:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	if (LDST_L_BIT(instr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 		unsigned long val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 		unsigned int __ua_flags = uaccess_save_and_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 		get16t_unaligned_check(val, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 		uaccess_restore(__ua_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 		/* signed half-word? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 		if (instr & 0x40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 			val = (signed long)((signed short) val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) 		regs->uregs[rd] = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) 		unsigned int __ua_flags = uaccess_save_and_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) 		put16t_unaligned_check(regs->uregs[rd], addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379) 		uaccess_restore(__ua_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) 	return TYPE_LDST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384)  fault:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	return TYPE_FAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) do_alignment_ldrdstrd(unsigned long addr, u32 instr, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 	unsigned int rd = RD_BITS(instr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 	unsigned int rd2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	int load;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	if ((instr & 0xfe000000) == 0xe8000000) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 		/* ARMv7 Thumb-2 32-bit LDRD/STRD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 		rd2 = (instr >> 8) & 0xf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 		load = !!(LDST_L_BIT(instr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 	} else if (((rd & 1) == 1) || (rd == 14))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 		goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 	else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 		load = ((instr & 0xf0) == 0xd0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 		rd2 = rd + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) 	ai_dword += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 	if (user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) 		goto user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411) 	if (load) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412) 		unsigned long val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413) 		get32_unaligned_check(val, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414) 		regs->uregs[rd] = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415) 		get32_unaligned_check(val, addr + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416) 		regs->uregs[rd2] = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418) 		put32_unaligned_check(regs->uregs[rd], addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419) 		put32_unaligned_check(regs->uregs[rd2], addr + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422) 	return TYPE_LDST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424)  user:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 	if (load) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 		unsigned long val, val2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) 		unsigned int __ua_flags = uaccess_save_and_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 		get32t_unaligned_check(val, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 		get32t_unaligned_check(val2, addr + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 		uaccess_restore(__ua_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 		regs->uregs[rd] = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 		regs->uregs[rd2] = val2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 		unsigned int __ua_flags = uaccess_save_and_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 		put32t_unaligned_check(regs->uregs[rd], addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 		put32t_unaligned_check(regs->uregs[rd2], addr + 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 		uaccess_restore(__ua_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	return TYPE_LDST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444)  bad:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	return TYPE_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446)  fault:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	return TYPE_FAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) do_alignment_ldrstr(unsigned long addr, u32 instr, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	unsigned int rd = RD_BITS(instr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 	ai_word += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	if ((!LDST_P_BIT(instr) && LDST_W_BIT(instr)) || user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 		goto trans;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	if (LDST_L_BIT(instr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 		unsigned int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 		get32_unaligned_check(val, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 		regs->uregs[rd] = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 		put32_unaligned_check(regs->uregs[rd], addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 	return TYPE_LDST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468)  trans:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 	if (LDST_L_BIT(instr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 		unsigned int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 		unsigned int __ua_flags = uaccess_save_and_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 		get32t_unaligned_check(val, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 		uaccess_restore(__ua_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 		regs->uregs[rd] = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 		unsigned int __ua_flags = uaccess_save_and_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 		put32t_unaligned_check(regs->uregs[rd], addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 		uaccess_restore(__ua_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 	return TYPE_LDST;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482)  fault:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 	return TYPE_FAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487)  * LDM/STM alignment handler.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489)  * There are 4 variants of this instruction:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491)  * B = rn pointer before instruction, A = rn pointer after instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492)  *              ------ increasing address ----->
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493)  *	        |    | r0 | r1 | ... | rx |    |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494)  * PU = 01             B                    A
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495)  * PU = 11        B                    A
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496)  * PU = 00        A                    B
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497)  * PU = 10             A                    B
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) do_alignment_ldmstm(unsigned long addr, u32 instr, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 	unsigned int rd, rn, correction, nr_regs, regbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	unsigned long eaddr, newaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	if (LDM_S_BIT(instr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 		goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 	correction = 4; /* processor implementation defined */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 	regs->ARM_pc += correction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	ai_multi += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	/* count the number of registers in the mask to be transferred */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) 	nr_regs = hweight16(REGMASK_BITS(instr)) * 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) 	rn = RN_BITS(instr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517) 	newaddr = eaddr = regs->uregs[rn];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519) 	if (!LDST_U_BIT(instr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520) 		nr_regs = -nr_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521) 	newaddr += nr_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) 	if (!LDST_U_BIT(instr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) 		eaddr = newaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 	if (LDST_P_EQ_U(instr))	/* U = P */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 		eaddr += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 	 * For alignment faults on the ARM922T/ARM920T the MMU  makes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 	 * the FSR (and hence addr) equal to the updated base address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 	 * of the multiple access rather than the restored value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 	 * Switch this message off if we've got a ARM92[02], otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 	 * [ls]dm alignment faults are noisy!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) #if !(defined CONFIG_CPU_ARM922T)  && !(defined CONFIG_CPU_ARM920T)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 	 * This is a "hint" - we already have eaddr worked out by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 	 * processor for us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 	if (addr != eaddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 		pr_err("LDMSTM: PC = %08lx, instr = %08x, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 			"addr = %08lx, eaddr = %08lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 			 instruction_pointer(regs), instr, addr, eaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 		show_regs(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548) 	if (user_mode(regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549) 		unsigned int __ua_flags = uaccess_save_and_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550) 		for (regbits = REGMASK_BITS(instr), rd = 0; regbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551) 		     regbits >>= 1, rd += 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552) 			if (regbits & 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553) 				if (LDST_L_BIT(instr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554) 					unsigned int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555) 					get32t_unaligned_check(val, eaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556) 					regs->uregs[rd] = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557) 				} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558) 					put32t_unaligned_check(regs->uregs[rd], eaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) 				eaddr += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 		uaccess_restore(__ua_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 		for (regbits = REGMASK_BITS(instr), rd = 0; regbits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) 		     regbits >>= 1, rd += 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 			if (regbits & 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 				if (LDST_L_BIT(instr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 					unsigned int val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 					get32_unaligned_check(val, eaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 					regs->uregs[rd] = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 				} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 					put32_unaligned_check(regs->uregs[rd], eaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 				eaddr += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 	if (LDST_W_BIT(instr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) 		regs->uregs[rn] = newaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578) 	if (!LDST_L_BIT(instr) || !(REGMASK_BITS(instr) & (1 << 15)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579) 		regs->ARM_pc -= correction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580) 	return TYPE_DONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582) fault:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583) 	regs->ARM_pc -= correction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584) 	return TYPE_FAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586) bad:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587) 	pr_err("Alignment trap: not handling ldm with s-bit set\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588) 	return TYPE_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592)  * Convert Thumb ld/st instruction forms to equivalent ARM instructions so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593)  * we can reuse ARM userland alignment fault fixups for Thumb.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595)  * This implementation was initially based on the algorithm found in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596)  * gdb/sim/arm/thumbemu.c. It is basically just a code reduction of same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597)  * to convert only Thumb ld/st instruction forms to equivalent ARM forms.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599)  * NOTES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600)  * 1. Comments below refer to ARM ARM DDI0100E Thumb Instruction sections.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601)  * 2. If for some reason we're passed an non-ld/st Thumb instruction to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602)  *    decode, we return 0xdeadc0de. This should never happen under normal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603)  *    circumstances but if it does, we've got other problems to deal with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604)  *    elsewhere and we obviously can't fix those problems here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) static unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) thumb2arm(u16 tinstr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 	u32 L = (tinstr & (1<<11)) >> 11;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 	switch ((tinstr & 0xf800) >> 11) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 	/* 6.5.1 Format 1: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 	case 0x6000 >> 11:				/* 7.1.52 STR(1) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	case 0x6800 >> 11:				/* 7.1.26 LDR(1) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) 	case 0x7000 >> 11:				/* 7.1.55 STRB(1) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	case 0x7800 >> 11:				/* 7.1.30 LDRB(1) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 		return 0xe5800000 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 			((tinstr & (1<<12)) << (22-12)) |	/* fixup */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 			(L<<20) |				/* L==1? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 			((tinstr & (7<<0)) << (12-0)) |		/* Rd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 			((tinstr & (7<<3)) << (16-3)) |		/* Rn */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 			((tinstr & (31<<6)) >>			/* immed_5 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 				(6 - ((tinstr & (1<<12)) ? 0 : 2)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	case 0x8000 >> 11:				/* 7.1.57 STRH(1) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 	case 0x8800 >> 11:				/* 7.1.32 LDRH(1) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 		return 0xe1c000b0 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 			(L<<20) |				/* L==1? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 			((tinstr & (7<<0)) << (12-0)) |		/* Rd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 			((tinstr & (7<<3)) << (16-3)) |		/* Rn */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 			((tinstr & (7<<6)) >> (6-1)) |	 /* immed_5[2:0] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 			((tinstr & (3<<9)) >> (9-8));	 /* immed_5[4:3] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 	/* 6.5.1 Format 2: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 	case 0x5000 >> 11:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 	case 0x5800 >> 11:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 		{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) 			static const u32 subset[8] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 				0xe7800000,		/* 7.1.53 STR(2) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) 				0xe18000b0,		/* 7.1.58 STRH(2) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 				0xe7c00000,		/* 7.1.56 STRB(2) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 				0xe19000d0,		/* 7.1.34 LDRSB */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 				0xe7900000,		/* 7.1.27 LDR(2) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 				0xe19000b0,		/* 7.1.33 LDRH(2) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 				0xe7d00000,		/* 7.1.31 LDRB(2) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 				0xe19000f0		/* 7.1.35 LDRSH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 			};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 			return subset[(tinstr & (7<<9)) >> 9] |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 			    ((tinstr & (7<<0)) << (12-0)) |	/* Rd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 			    ((tinstr & (7<<3)) << (16-3)) |	/* Rn */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 			    ((tinstr & (7<<6)) >> (6-0));	/* Rm */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 	/* 6.5.1 Format 3: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 	case 0x4800 >> 11:				/* 7.1.28 LDR(3) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 		/* NOTE: This case is not technically possible. We're
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 		 *	 loading 32-bit memory data via PC relative
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 		 *	 addressing mode. So we can and should eliminate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 		 *	 this case. But I'll leave it here for now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 		return 0xe59f0000 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 		    ((tinstr & (7<<8)) << (12-8)) |		/* Rd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 		    ((tinstr & 255) << (2-0));			/* immed_8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	/* 6.5.1 Format 4: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	case 0x9000 >> 11:				/* 7.1.54 STR(3) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	case 0x9800 >> 11:				/* 7.1.29 LDR(4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 		return 0xe58d0000 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 			(L<<20) |				/* L==1? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 			((tinstr & (7<<8)) << (12-8)) |		/* Rd */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 			((tinstr & 255) << 2);			/* immed_8 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	/* 6.6.1 Format 1: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 	case 0xc000 >> 11:				/* 7.1.51 STMIA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 	case 0xc800 >> 11:				/* 7.1.25 LDMIA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 		{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) 			u32 Rn = (tinstr & (7<<8)) >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 			u32 W = ((L<<Rn) & (tinstr&255)) ? 0 : 1<<21;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680) 			return 0xe8800000 | W | (L<<20) | (Rn<<16) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681) 				(tinstr&255);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684) 	/* 6.6.1 Format 2: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685) 	case 0xb000 >> 11:				/* 7.1.48 PUSH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686) 	case 0xb800 >> 11:				/* 7.1.47 POP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687) 		if ((tinstr & (3 << 9)) == 0x0400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688) 			static const u32 subset[4] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689) 				0xe92d0000,	/* STMDB sp!,{registers} */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690) 				0xe92d4000,	/* STMDB sp!,{registers,lr} */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) 				0xe8bd0000,	/* LDMIA sp!,{registers} */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 				0xe8bd8000	/* LDMIA sp!,{registers,pc} */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) 			};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 			return subset[(L<<1) | ((tinstr & (1<<8)) >> 8)] |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) 			    (tinstr & 255);		/* register_list */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) 		fallthrough;	/* for illegal instruction case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700) 		return BAD_INSTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705)  * Convert Thumb-2 32 bit LDM, STM, LDRD, STRD to equivalent instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706)  * handlable by ARM alignment handler, also find the corresponding handler,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707)  * so that we can reuse ARM userland alignment fault fixups for Thumb.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709)  * @pinstr: original Thumb-2 instruction; returns new handlable instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710)  * @regs: register context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711)  * @poffset: return offset from faulted addr for later writeback
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713)  * NOTES:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714)  * 1. Comments below refer to ARMv7 DDI0406A Thumb Instruction sections.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715)  * 2. Register name Rt from ARMv7 is same as Rd from ARMv6 (Rd is Rt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) static void *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) do_alignment_t32_to_handler(u32 *pinstr, struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719) 			    union offset_union *poffset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721) 	u32 instr = *pinstr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722) 	u16 tinst1 = (instr >> 16) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723) 	u16 tinst2 = instr & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725) 	switch (tinst1 & 0xffe0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726) 	/* A6.3.5 Load/Store multiple */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727) 	case 0xe880:		/* STM/STMIA/STMEA,LDM/LDMIA, PUSH/POP T2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728) 	case 0xe8a0:		/* ...above writeback version */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729) 	case 0xe900:		/* STMDB/STMFD, LDMDB/LDMEA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730) 	case 0xe920:		/* ...above writeback version */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731) 		/* no need offset decision since handler calculates it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732) 		return do_alignment_ldmstm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) 	case 0xf840:		/* POP/PUSH T3 (single register) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 		if (RN_BITS(instr) == 13 && (tinst2 & 0x09ff) == 0x0904) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 			u32 L = !!(LDST_L_BIT(instr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) 			const u32 subset[2] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 				0xe92d0000,	/* STMDB sp!,{registers} */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 				0xe8bd0000,	/* LDMIA sp!,{registers} */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 			};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 			*pinstr = subset[L] | (1<<RD_BITS(instr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 			return do_alignment_ldmstm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 		/* Else fall through for illegal instruction case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	/* A6.3.6 Load/store double, STRD/LDRD(immed, lit, reg) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	case 0xe860:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 	case 0xe960:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 	case 0xe8e0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 	case 0xe9e0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 		poffset->un = (tinst2 & 0xff) << 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 	case 0xe940:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 	case 0xe9c0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 		return do_alignment_ldrdstrd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 	 * No need to handle load/store instructions up to word size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 	 * since ARMv6 and later CPUs can perform unaligned accesses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) static int alignment_get_arm(struct pt_regs *regs, u32 *ip, u32 *inst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 	u32 instr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 	int fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 	if (user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 		fault = get_user(instr, ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 		fault = get_kernel_nofault(instr, ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 	*inst = __mem_to_opcode_arm(instr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 	return fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) static int alignment_get_thumb(struct pt_regs *regs, u16 *ip, u16 *inst)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 	u16 instr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 	int fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	if (user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 		fault = get_user(instr, ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) 		fault = get_kernel_nofault(instr, ip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) 	*inst = __mem_to_opcode_thumb16(instr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) 	return fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 	union offset_union offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 	unsigned long instrptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	int (*handler)(unsigned long addr, u32 instr, struct pt_regs *regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 	unsigned int type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	u32 instr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) 	u16 tinstr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 	int isize = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) 	int thumb2_32b = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) 	int fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 	if (interrupts_enabled(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 		local_irq_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 	instrptr = instruction_pointer(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) 	if (thumb_mode(regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 		u16 *ptr = (u16 *)(instrptr & ~1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820) 		fault = alignment_get_thumb(regs, ptr, &tinstr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821) 		if (!fault) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822) 			if (cpu_architecture() >= CPU_ARCH_ARMv7 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823) 			    IS_T32(tinstr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824) 				/* Thumb-2 32-bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825) 				u16 tinst2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826) 				fault = alignment_get_thumb(regs, ptr + 1, &tinst2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) 				instr = __opcode_thumb32_compose(tinstr, tinst2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) 				thumb2_32b = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 			} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 				isize = 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 				instr = thumb2arm(tinstr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 		fault = alignment_get_arm(regs, (void *)instrptr, &instr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) 	if (fault) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) 		type = TYPE_FAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 		goto bad_or_fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	if (user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 		goto user;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 	ai_sys += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 	ai_sys_last_pc = (void *)instruction_pointer(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849)  fixup:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) 	regs->ARM_pc += isize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) 	switch (CODING_BITS(instr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	case 0x00000000:	/* 3.13.4 load/store instruction extensions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 		if (LDSTHD_I_BIT(instr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 			offset.un = (instr & 0xf00) >> 4 | (instr & 15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 			offset.un = regs->uregs[RM_BITS(instr)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) 		if ((instr & 0x000000f0) == 0x000000b0 || /* LDRH, STRH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) 		    (instr & 0x001000f0) == 0x001000f0)   /* LDRSH */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 			handler = do_alignment_ldrhstrh;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) 		else if ((instr & 0x001000f0) == 0x000000d0 || /* LDRD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864) 			 (instr & 0x001000f0) == 0x000000f0)   /* STRD */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865) 			handler = do_alignment_ldrdstrd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866) 		else if ((instr & 0x01f00ff0) == 0x01000090) /* SWP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867) 			goto swp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869) 			goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872) 	case 0x04000000:	/* ldr or str immediate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873) 		if (COND_BITS(instr) == 0xf0000000) /* NEON VLDn, VSTn */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) 			goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 		offset.un = OFFSET_BITS(instr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) 		handler = do_alignment_ldrstr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 	case 0x06000000:	/* ldr or str register */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 		offset.un = regs->uregs[RM_BITS(instr)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 		if (IS_SHIFT(instr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 			unsigned int shiftval = SHIFT_BITS(instr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 			switch(SHIFT_TYPE(instr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 			case SHIFT_LSL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 				offset.un <<= shiftval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 			case SHIFT_LSR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 				offset.un >>= shiftval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 			case SHIFT_ASR:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) 				offset.sn >>= shiftval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898) 			case SHIFT_RORRRX:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899) 				if (shiftval == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900) 					offset.un >>= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901) 					if (regs->ARM_cpsr & PSR_C_BIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902) 						offset.un |= 1 << 31;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903) 				} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) 					offset.un = offset.un >> shiftval |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) 							  offset.un << (32 - shiftval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) 		handler = do_alignment_ldrstr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912) 	case 0x08000000:	/* ldm or stm, or thumb-2 32bit instruction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913) 		if (thumb2_32b) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914) 			offset.un = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915) 			handler = do_alignment_t32_to_handler(&instr, regs, &offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) 			offset.un = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 			handler = do_alignment_ldmstm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923) 		goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926) 	if (!handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927) 		goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) 	type = handler(addr, instr, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	if (type == TYPE_ERROR || type == TYPE_FAULT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 		regs->ARM_pc -= isize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 		goto bad_or_fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) 	if (type == TYPE_LDST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936) 		do_alignment_finish_ldst(addr, instr, regs, offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940)  bad_or_fault:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941) 	if (type == TYPE_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) 		goto bad;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	 * We got a fault - fix it up, or die.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 	do_bad_area(addr, fsr, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949)  swp:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950) 	pr_err("Alignment trap: not handling swp instruction\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952)  bad:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) 	 * Oops, we didn't handle the instruction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	pr_err("Alignment trap: not handling instruction "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) 		"%0*x at [<%08lx>]\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 		isize << 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) 		isize == 2 ? tinstr : instr, instrptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 	ai_skipped += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963)  user:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 	ai_user += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	if (ai_usermode & UM_WARN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 		printk("Alignment trap: %s (%d) PC=0x%08lx Instr=0x%0*x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 		       "Address=0x%08lx FSR 0x%03x\n", current->comm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 			task_pid_nr(current), instrptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 			isize << 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 			isize == 2 ? tinstr : instr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 		        addr, fsr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	if (ai_usermode & UM_FIXUP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 		goto fixup;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 	if (ai_usermode & UM_SIGNAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 		force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 		 * We're about to disable the alignment trap and return to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 		 * user space.  But if an interrupt occurs before actually
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 		 * reaching user space, then the IRQ vector entry code will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 		 * notice that we were still in kernel space and therefore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 		 * the alignment trap won't be re-enabled in that case as it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 		 * is presumed to be always on from kernel space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) 		 * Let's prevent that race by disabling interrupts here (they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 		 * are disabled on the way back to user space anyway in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) 		 * entry-common.S) and disable the alignment trap only if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990) 		 * there is no work pending for this thread.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992) 		raw_local_irq_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993) 		if (!(current_thread_info()->flags & _TIF_WORK_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994) 			set_cr(cr_no_alignment);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) static int __init noalign_setup(char *__unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) 	set_cr(__clear_cr(CR_A));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) 	return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) __setup("noalign", noalign_setup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)  * This needs to be done after sysctl_init, otherwise sys/ will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)  * overwritten.  Actually, this shouldn't be in sys/ at all since
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)  * it isn't a sysctl, and it doesn't contain sysctl information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)  * We now locate it in /proc/cpu/alignment instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) static int __init alignment_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) #ifdef CONFIG_PROC_FS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 	struct proc_dir_entry *res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 	res = proc_create("cpu/alignment", S_IWUSR | S_IRUGO, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) 			  &alignment_proc_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	if (!res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	if (cpu_is_v6_unaligned()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 		set_cr(__clear_cr(CR_A));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 		ai_usermode = safe_usermode(ai_usermode, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 	cr_no_alignment = get_cr() & ~CR_A;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 	hook_fault_code(FAULT_CODE_ALIGNMENT, do_alignment, SIGBUS, BUS_ADRALN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 			"alignment exception");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 	 * ARMv6K and ARMv7 use fault status 3 (0b00011) as Access Flag section
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 	 * fault, not as alignment error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 	 * TODO: handle ARMv6K properly. Runtime check for 'K' extension is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 	 * needed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 	if (cpu_architecture() <= CPU_ARCH_ARMv6) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 		hook_fault_code(3, do_alignment, SIGBUS, BUS_ADRALN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 				"alignment exception");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) fs_initcall(alignment_init);