^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * linux/arch/nios2/kernel/misaligned.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * basic emulation for mis-aligned accesses on the NIOS II cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * modelled after the version for arm in arm/alignment.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Brad Parker <brad@heeltoe.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright (C) 2010 Ambient Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Copyright (c) 2010 Altera Corporation, San Jose, California, USA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Copyright (c) 2010 Arrow Electronics, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * This file is subject to the terms and conditions of the GNU General
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Public License. See the file COPYING in the main directory of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * this archive for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/proc_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <asm/traps.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <asm/unaligned.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) /* instructions we emulate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define INST_LDHU 0x0b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define INST_STH 0x0d
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #define INST_LDH 0x0f
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define INST_STW 0x15
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define INST_LDW 0x17
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) static unsigned int ma_usermode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define UM_WARN 0x01
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define UM_FIXUP 0x02
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define UM_SIGNAL 0x04
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define KM_WARN 0x08
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) /* see arch/nios2/include/asm/ptrace.h */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) static u8 sys_stack_frame_reg_offset[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /* struct pt_regs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) /* struct switch_stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) 16, 17, 18, 19, 20, 21, 22, 23, 0, 0, 0, 0, 0, 0, 0, 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) static int reg_offsets[32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) static inline u32 get_reg_val(struct pt_regs *fp, int reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) u8 *p = ((u8 *)fp) + reg_offsets[reg];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) return *(u32 *)p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) static inline void put_reg_val(struct pt_regs *fp, int reg, u32 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) u8 *p = ((u8 *)fp) + reg_offsets[reg];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) *(u32 *)p = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * (mis)alignment handler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) asmlinkage void handle_unaligned_c(struct pt_regs *fp, int cause)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) u32 isn, addr, val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) int in_kernel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) u8 a, b, d0, d1, d2, d3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) s16 imm16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) unsigned int fault;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) /* back up one instruction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) fp->ea -= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) if (fixup_exception(fp)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) in_kernel = !user_mode(fp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) isn = *(unsigned long *)(fp->ea);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) fault = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) /* do fixup if in kernel or mode turned on */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) if (in_kernel || (ma_usermode & UM_FIXUP)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) /* decompose instruction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) a = (isn >> 27) & 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) b = (isn >> 22) & 0x1f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) imm16 = (isn >> 6) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) addr = get_reg_val(fp, a) + imm16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) /* do fixup to saved registers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) switch (isn & 0x3f) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) case INST_LDHU:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) fault |= __get_user(d0, (u8 *)(addr+0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) fault |= __get_user(d1, (u8 *)(addr+1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) val = (d1 << 8) | d0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) put_reg_val(fp, b, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) case INST_STH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) val = get_reg_val(fp, b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) d1 = val >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) d0 = val >> 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) if (in_kernel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) *(u8 *)(addr+0) = d0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) *(u8 *)(addr+1) = d1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) fault |= __put_user(d0, (u8 *)(addr+0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) fault |= __put_user(d1, (u8 *)(addr+1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) case INST_LDH:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) fault |= __get_user(d0, (u8 *)(addr+0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) fault |= __get_user(d1, (u8 *)(addr+1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) val = (short)((d1 << 8) | d0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) put_reg_val(fp, b, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) case INST_STW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) val = get_reg_val(fp, b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) d3 = val >> 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) d2 = val >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) d1 = val >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) d0 = val >> 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) if (in_kernel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) *(u8 *)(addr+0) = d0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) *(u8 *)(addr+1) = d1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) *(u8 *)(addr+2) = d2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) *(u8 *)(addr+3) = d3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) fault |= __put_user(d0, (u8 *)(addr+0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) fault |= __put_user(d1, (u8 *)(addr+1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) fault |= __put_user(d2, (u8 *)(addr+2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) fault |= __put_user(d3, (u8 *)(addr+3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) case INST_LDW:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) fault |= __get_user(d0, (u8 *)(addr+0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) fault |= __get_user(d1, (u8 *)(addr+1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) fault |= __get_user(d2, (u8 *)(addr+2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) fault |= __get_user(d3, (u8 *)(addr+3));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) val = (d3 << 24) | (d2 << 16) | (d1 << 8) | d0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) put_reg_val(fp, b, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) addr = RDCTL(CTL_BADADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) cause >>= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) if (fault) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) if (in_kernel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) pr_err("fault during kernel misaligned fixup @ %#lx; addr 0x%08x; isn=0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) fp->ea, (unsigned int)addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) (unsigned int)isn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) pr_err("fault during user misaligned fixup @ %#lx; isn=%08x addr=0x%08x sp=0x%08lx pid=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) fp->ea,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) (unsigned int)isn, addr, fp->sp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) current->pid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) _exception(SIGSEGV, fp, SEGV_MAPERR, fp->ea);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * kernel mode -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * note exception and skip bad instruction (return)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) if (in_kernel) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) fp->ea += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) if (ma_usermode & KM_WARN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) pr_err("kernel unaligned access @ %#lx; BADADDR 0x%08x; cause=%d, isn=0x%08x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) fp->ea,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) (unsigned int)addr, cause,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) (unsigned int)isn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) /* show_regs(fp); */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * user mode -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * possibly warn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * possibly send SIGBUS signal to process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if (ma_usermode & UM_WARN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) pr_err("user unaligned access @ %#lx; isn=0x%08lx ea=0x%08lx ra=0x%08lx sp=0x%08lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) (unsigned long)addr, (unsigned long)isn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) fp->ea, fp->ra, fp->sp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) if (ma_usermode & UM_SIGNAL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) _exception(SIGBUS, fp, BUS_ADRALN, fp->ea);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) fp->ea += 4; /* else advance */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) static void __init misaligned_calc_reg_offsets(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) int i, r, offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) /* pre-calc offsets of registers on sys call stack frame */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) /* struct pt_regs */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) for (i = 0; i < 16; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) r = sys_stack_frame_reg_offset[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) reg_offsets[r] = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) offset += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) /* struct switch_stack */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) offset = -sizeof(struct switch_stack);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) for (i = 16; i < 32; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) r = sys_stack_frame_reg_offset[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) reg_offsets[r] = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) offset += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) static int __init misaligned_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) /* default mode - silent fix */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) ma_usermode = UM_FIXUP | KM_WARN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) misaligned_calc_reg_offsets();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) fs_initcall(misaligned_init);