^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Architecture-specific unaligned trap handling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 1999-2002, 2004 Hewlett-Packard Co
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Stephane Eranian <eranian@hpl.hp.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * David Mosberger-Tang <davidm@hpl.hp.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * 2002/12/09 Fix rotating register handling (off-by-1 error, missing fr-rotation). Fix
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * get_rse_reg() to not leak kernel bits to user-level (reading an out-of-frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * stacked register returns an undefined value; it does NOT trigger a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * "rsvd register fault").
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * 2001/10/11 Fix unaligned access to rotating registers in s/w pipelined loops.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * 2001/08/13 Correct size of extended floats (float_fsz) from 16 to 10 bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * 2001/01/17 Add support emulation of unaligned kernel accesses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/jiffies.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/tty.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/extable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/ratelimit.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <asm/intrinsics.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <asm/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <asm/rse.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <asm/exception.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <asm/unaligned.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) extern int die_if_kernel(char *str, struct pt_regs *regs, long err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #undef DEBUG_UNALIGNED_TRAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #ifdef DEBUG_UNALIGNED_TRAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) # define DPRINT(a...) do { printk("%s %u: ", __func__, __LINE__); printk (a); } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) # define DDUMP(str,vp,len) dump(str, vp, len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) dump (const char *str, void *vp, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) unsigned char *cp = vp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) printk("%s", str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) for (i = 0; i < len; ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) printk (" %02x", *cp++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) printk("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) # define DPRINT(a...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) # define DDUMP(str,vp,len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #define IA64_FIRST_STACKED_GR 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define IA64_FIRST_ROTATING_FR 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define SIGN_EXT9 0xffffffffffffff00ul
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * sysctl settable hook which tells the kernel whether to honor the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * IA64_THREAD_UAC_NOPRINT prctl. Because this is user settable, we want
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * to allow the super user to enable/disable this for security reasons
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * (i.e. don't allow attacker to fill up logs with unaligned accesses).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) int no_unaligned_warning;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) int unaligned_dump_stack;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * For M-unit:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * opcode | m | x6 |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * --------|------|---------|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * [40-37] | [36] | [35:30] |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * --------|------|---------|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * 4 | 1 | 6 | = 11 bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * --------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * However bits [31:30] are not directly useful to distinguish between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * load/store so we can use [35:32] instead, which gives the following
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * mask ([40:32]) using 9 bits. The 'e' comes from the fact that we defer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * checking the m-bit until later in the load/store emulation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define IA64_OPCODE_MASK 0x1ef
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define IA64_OPCODE_SHIFT 32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * Table C-28 Integer Load/Store
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * We ignore [35:32]= 0x6, 0x7, 0xE, 0xF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * ld8.fill, st8.fill MUST be aligned because the RNATs are based on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * the address (bits [8:3]), so we must failed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #define LD_OP 0x080
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #define LDS_OP 0x081
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #define LDA_OP 0x082
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #define LDSA_OP 0x083
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #define LDBIAS_OP 0x084
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define LDACQ_OP 0x085
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) /* 0x086, 0x087 are not relevant */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define LDCCLR_OP 0x088
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define LDCNC_OP 0x089
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #define LDCCLRACQ_OP 0x08a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #define ST_OP 0x08c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define STREL_OP 0x08d
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /* 0x08e,0x8f are not relevant */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * Table C-29 Integer Load +Reg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * we use the ld->m (bit [36:36]) field to determine whether or not we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * a load/store of this form.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * Table C-30 Integer Load/Store +Imm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * We ignore [35:32]= 0x6, 0x7, 0xE, 0xF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * ld8.fill, st8.fill must be aligned because the Nat register are based on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * the address, so we must fail and the program must be fixed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #define LD_IMM_OP 0x0a0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #define LDS_IMM_OP 0x0a1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #define LDA_IMM_OP 0x0a2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #define LDSA_IMM_OP 0x0a3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #define LDBIAS_IMM_OP 0x0a4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #define LDACQ_IMM_OP 0x0a5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) /* 0x0a6, 0xa7 are not relevant */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #define LDCCLR_IMM_OP 0x0a8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #define LDCNC_IMM_OP 0x0a9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #define LDCCLRACQ_IMM_OP 0x0aa
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) #define ST_IMM_OP 0x0ac
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #define STREL_IMM_OP 0x0ad
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) /* 0x0ae,0xaf are not relevant */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) * Table C-32 Floating-point Load/Store
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #define LDF_OP 0x0c0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) #define LDFS_OP 0x0c1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) #define LDFA_OP 0x0c2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) #define LDFSA_OP 0x0c3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) /* 0x0c6 is irrelevant */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #define LDFCCLR_OP 0x0c8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) #define LDFCNC_OP 0x0c9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) /* 0x0cb is irrelevant */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #define STF_OP 0x0cc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * Table C-33 Floating-point Load +Reg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * we use the ld->m (bit [36:36]) field to determine whether or not we have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * a load/store of this form.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) * Table C-34 Floating-point Load/Store +Imm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) #define LDF_IMM_OP 0x0e0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) #define LDFS_IMM_OP 0x0e1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) #define LDFA_IMM_OP 0x0e2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) #define LDFSA_IMM_OP 0x0e3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) /* 0x0e6 is irrelevant */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) #define LDFCCLR_IMM_OP 0x0e8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) #define LDFCNC_IMM_OP 0x0e9
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) #define STF_IMM_OP 0x0ec
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) typedef struct {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) unsigned long qp:6; /* [0:5] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) unsigned long r1:7; /* [6:12] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) unsigned long imm:7; /* [13:19] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) unsigned long r3:7; /* [20:26] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) unsigned long x:1; /* [27:27] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) unsigned long hint:2; /* [28:29] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) unsigned long x6_sz:2; /* [30:31] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) unsigned long x6_op:4; /* [32:35], x6 = x6_sz|x6_op */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) unsigned long m:1; /* [36:36] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) unsigned long op:4; /* [37:40] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) unsigned long pad:23; /* [41:63] */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) } load_store_t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) typedef enum {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) UPD_IMMEDIATE, /* ldXZ r1=[r3],imm(9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) UPD_REG /* ldXZ r1=[r3],r2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) } update_t;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * We use tables to keep track of the offsets of registers in the saved state.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * This way we save having big switch/case statements.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * We use bit 0 to indicate switch_stack or pt_regs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * The offset is simply shifted by 1 bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * A 2-byte value should be enough to hold any kind of offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * In case the calling convention changes (and thus pt_regs/switch_stack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * simply use RSW instead of RPT or vice-versa.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) #define RPO(x) ((size_t) &((struct pt_regs *)0)->x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) #define RSO(x) ((size_t) &((struct switch_stack *)0)->x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) #define RPT(x) (RPO(x) << 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) #define RSW(x) (1| RSO(x)<<1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) #define GR_OFFS(x) (gr_info[x]>>1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) #define GR_IN_SW(x) (gr_info[x] & 0x1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) #define FR_OFFS(x) (fr_info[x]>>1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) #define FR_IN_SW(x) (fr_info[x] & 0x1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) static u16 gr_info[32]={
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 0, /* r0 is read-only : WE SHOULD NEVER GET THIS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) RPT(r1), RPT(r2), RPT(r3),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) RSW(r4), RSW(r5), RSW(r6), RSW(r7),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) RPT(r8), RPT(r9), RPT(r10), RPT(r11),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) RPT(r12), RPT(r13), RPT(r14), RPT(r15),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) RPT(r16), RPT(r17), RPT(r18), RPT(r19),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) RPT(r20), RPT(r21), RPT(r22), RPT(r23),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) RPT(r24), RPT(r25), RPT(r26), RPT(r27),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) RPT(r28), RPT(r29), RPT(r30), RPT(r31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) static u16 fr_info[32]={
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 0, /* constant : WE SHOULD NEVER GET THIS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 0, /* constant : WE SHOULD NEVER GET THIS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) RSW(f2), RSW(f3), RSW(f4), RSW(f5),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) RPT(f6), RPT(f7), RPT(f8), RPT(f9),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) RPT(f10), RPT(f11),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) RSW(f12), RSW(f13), RSW(f14),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) RSW(f15), RSW(f16), RSW(f17), RSW(f18), RSW(f19),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) RSW(f20), RSW(f21), RSW(f22), RSW(f23), RSW(f24),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) RSW(f25), RSW(f26), RSW(f27), RSW(f28), RSW(f29),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) RSW(f30), RSW(f31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) /* Invalidate ALAT entry for integer register REGNO. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) invala_gr (int regno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) # define F(reg) case reg: ia64_invala_gr(reg); break
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) switch (regno) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) F( 0); F( 1); F( 2); F( 3); F( 4); F( 5); F( 6); F( 7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) F( 8); F( 9); F( 10); F( 11); F( 12); F( 13); F( 14); F( 15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) F( 16); F( 17); F( 18); F( 19); F( 20); F( 21); F( 22); F( 23);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) F( 24); F( 25); F( 26); F( 27); F( 28); F( 29); F( 30); F( 31);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) F( 32); F( 33); F( 34); F( 35); F( 36); F( 37); F( 38); F( 39);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) F( 40); F( 41); F( 42); F( 43); F( 44); F( 45); F( 46); F( 47);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) F( 48); F( 49); F( 50); F( 51); F( 52); F( 53); F( 54); F( 55);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) F( 56); F( 57); F( 58); F( 59); F( 60); F( 61); F( 62); F( 63);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) F( 64); F( 65); F( 66); F( 67); F( 68); F( 69); F( 70); F( 71);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) F( 72); F( 73); F( 74); F( 75); F( 76); F( 77); F( 78); F( 79);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) F( 80); F( 81); F( 82); F( 83); F( 84); F( 85); F( 86); F( 87);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) F( 88); F( 89); F( 90); F( 91); F( 92); F( 93); F( 94); F( 95);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) F( 96); F( 97); F( 98); F( 99); F(100); F(101); F(102); F(103);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) F(104); F(105); F(106); F(107); F(108); F(109); F(110); F(111);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) F(112); F(113); F(114); F(115); F(116); F(117); F(118); F(119);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) F(120); F(121); F(122); F(123); F(124); F(125); F(126); F(127);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) # undef F
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) /* Invalidate ALAT entry for floating-point register REGNO. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) invala_fr (int regno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) # define F(reg) case reg: ia64_invala_fr(reg); break
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) switch (regno) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) F( 0); F( 1); F( 2); F( 3); F( 4); F( 5); F( 6); F( 7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) F( 8); F( 9); F( 10); F( 11); F( 12); F( 13); F( 14); F( 15);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) F( 16); F( 17); F( 18); F( 19); F( 20); F( 21); F( 22); F( 23);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) F( 24); F( 25); F( 26); F( 27); F( 28); F( 29); F( 30); F( 31);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) F( 32); F( 33); F( 34); F( 35); F( 36); F( 37); F( 38); F( 39);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) F( 40); F( 41); F( 42); F( 43); F( 44); F( 45); F( 46); F( 47);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) F( 48); F( 49); F( 50); F( 51); F( 52); F( 53); F( 54); F( 55);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) F( 56); F( 57); F( 58); F( 59); F( 60); F( 61); F( 62); F( 63);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) F( 64); F( 65); F( 66); F( 67); F( 68); F( 69); F( 70); F( 71);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) F( 72); F( 73); F( 74); F( 75); F( 76); F( 77); F( 78); F( 79);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) F( 80); F( 81); F( 82); F( 83); F( 84); F( 85); F( 86); F( 87);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) F( 88); F( 89); F( 90); F( 91); F( 92); F( 93); F( 94); F( 95);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) F( 96); F( 97); F( 98); F( 99); F(100); F(101); F(102); F(103);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) F(104); F(105); F(106); F(107); F(108); F(109); F(110); F(111);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) F(112); F(113); F(114); F(115); F(116); F(117); F(118); F(119);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) F(120); F(121); F(122); F(123); F(124); F(125); F(126); F(127);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) # undef F
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) static inline unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) rotate_reg (unsigned long sor, unsigned long rrb, unsigned long reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) reg += rrb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) if (reg >= sor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) reg -= sor;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) return reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) set_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long val, int nat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) struct switch_stack *sw = (struct switch_stack *) regs - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) unsigned long *bsp, *bspstore, *addr, *rnat_addr, *ubs_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) unsigned long *kbs = (void *) current + IA64_RBS_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) unsigned long rnats, nat_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) unsigned long on_kbs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) long sof = (regs->cr_ifs) & 0x7f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) long sor = 8 * ((regs->cr_ifs >> 14) & 0xf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) long ridx = r1 - 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if (ridx >= sof) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) /* this should never happen, as the "rsvd register fault" has higher priority */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) DPRINT("ignoring write to r%lu; only %lu registers are allocated!\n", r1, sof);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) if (ridx < sor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) ridx = rotate_reg(sor, rrb_gr, ridx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) DPRINT("r%lu, sw.bspstore=%lx pt.bspstore=%lx sof=%ld sol=%ld ridx=%ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) r1, sw->ar_bspstore, regs->ar_bspstore, sof, (regs->cr_ifs >> 7) & 0x7f, ridx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) on_kbs = ia64_rse_num_regs(kbs, (unsigned long *) sw->ar_bspstore);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) addr = ia64_rse_skip_regs((unsigned long *) sw->ar_bspstore, -sof + ridx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) if (addr >= kbs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) /* the register is on the kernel backing store: easy... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) rnat_addr = ia64_rse_rnat_addr(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) if ((unsigned long) rnat_addr >= sw->ar_bspstore)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) rnat_addr = &sw->ar_rnat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) nat_mask = 1UL << ia64_rse_slot_num(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) *addr = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) if (nat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) *rnat_addr |= nat_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) *rnat_addr &= ~nat_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) if (!user_stack(current, regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) DPRINT("ignoring kernel write to r%lu; register isn't on the kernel RBS!", r1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) bspstore = (unsigned long *)regs->ar_bspstore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) ubs_end = ia64_rse_skip_regs(bspstore, on_kbs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) bsp = ia64_rse_skip_regs(ubs_end, -sof);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) addr = ia64_rse_skip_regs(bsp, ridx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) DPRINT("ubs_end=%p bsp=%p addr=%p\n", (void *) ubs_end, (void *) bsp, (void *) addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) ia64_poke(current, sw, (unsigned long) ubs_end, (unsigned long) addr, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) rnat_addr = ia64_rse_rnat_addr(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) ia64_peek(current, sw, (unsigned long) ubs_end, (unsigned long) rnat_addr, &rnats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) DPRINT("rnat @%p = 0x%lx nat=%d old nat=%ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) (void *) rnat_addr, rnats, nat, (rnats >> ia64_rse_slot_num(addr)) & 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) nat_mask = 1UL << ia64_rse_slot_num(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) if (nat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) rnats |= nat_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) rnats &= ~nat_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) ia64_poke(current, sw, (unsigned long) ubs_end, (unsigned long) rnat_addr, rnats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) DPRINT("rnat changed to @%p = 0x%lx\n", (void *) rnat_addr, rnats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) get_rse_reg (struct pt_regs *regs, unsigned long r1, unsigned long *val, int *nat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) struct switch_stack *sw = (struct switch_stack *) regs - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) unsigned long *bsp, *addr, *rnat_addr, *ubs_end, *bspstore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) unsigned long *kbs = (void *) current + IA64_RBS_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) unsigned long rnats, nat_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) unsigned long on_kbs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) long sof = (regs->cr_ifs) & 0x7f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) long sor = 8 * ((regs->cr_ifs >> 14) & 0xf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) long rrb_gr = (regs->cr_ifs >> 18) & 0x7f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) long ridx = r1 - 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) if (ridx >= sof) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) /* read of out-of-frame register returns an undefined value; 0 in our case. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) DPRINT("ignoring read from r%lu; only %lu registers are allocated!\n", r1, sof);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) if (ridx < sor)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) ridx = rotate_reg(sor, rrb_gr, ridx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) DPRINT("r%lu, sw.bspstore=%lx pt.bspstore=%lx sof=%ld sol=%ld ridx=%ld\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) r1, sw->ar_bspstore, regs->ar_bspstore, sof, (regs->cr_ifs >> 7) & 0x7f, ridx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) on_kbs = ia64_rse_num_regs(kbs, (unsigned long *) sw->ar_bspstore);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) addr = ia64_rse_skip_regs((unsigned long *) sw->ar_bspstore, -sof + ridx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (addr >= kbs) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) /* the register is on the kernel backing store: easy... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) *val = *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) if (nat) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) rnat_addr = ia64_rse_rnat_addr(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) if ((unsigned long) rnat_addr >= sw->ar_bspstore)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) rnat_addr = &sw->ar_rnat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) nat_mask = 1UL << ia64_rse_slot_num(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) *nat = (*rnat_addr & nat_mask) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) if (!user_stack(current, regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) DPRINT("ignoring kernel read of r%lu; register isn't on the RBS!", r1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) bspstore = (unsigned long *)regs->ar_bspstore;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) ubs_end = ia64_rse_skip_regs(bspstore, on_kbs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) bsp = ia64_rse_skip_regs(ubs_end, -sof);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) addr = ia64_rse_skip_regs(bsp, ridx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) DPRINT("ubs_end=%p bsp=%p addr=%p\n", (void *) ubs_end, (void *) bsp, (void *) addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) ia64_peek(current, sw, (unsigned long) ubs_end, (unsigned long) addr, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) if (nat) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) rnat_addr = ia64_rse_rnat_addr(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) nat_mask = 1UL << ia64_rse_slot_num(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) DPRINT("rnat @%p = 0x%lx\n", (void *) rnat_addr, rnats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) ia64_peek(current, sw, (unsigned long) ubs_end, (unsigned long) rnat_addr, &rnats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) *nat = (rnats & nat_mask) != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) *val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) if (nat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) *nat = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) setreg (unsigned long regnum, unsigned long val, int nat, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) struct switch_stack *sw = (struct switch_stack *) regs - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) unsigned long bitmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) unsigned long *unat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) * First takes care of stacked registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) if (regnum >= IA64_FIRST_STACKED_GR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) set_rse_reg(regs, regnum, val, nat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) * Using r0 as a target raises a General Exception fault which has higher priority
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) * than the Unaligned Reference fault.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) * Now look at registers in [0-31] range and init correct UNAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) if (GR_IN_SW(regnum)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) addr = (unsigned long)sw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) unat = &sw->ar_unat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) addr = (unsigned long)regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) unat = &sw->caller_unat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) DPRINT("tmp_base=%lx switch_stack=%s offset=%d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) addr, unat==&sw->ar_unat ? "yes":"no", GR_OFFS(regnum));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) * add offset from base of struct
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) * and do it !
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) addr += GR_OFFS(regnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) *(unsigned long *)addr = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) * We need to clear the corresponding UNAT bit to fully emulate the load
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * UNAT bit_pos = GR[r3]{8:3} form EAS-2.4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) bitmask = 1UL << (addr >> 3 & 0x3f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) DPRINT("*0x%lx=0x%lx NaT=%d prev_unat @%p=%lx\n", addr, val, nat, (void *) unat, *unat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) if (nat) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) *unat |= bitmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) *unat &= ~bitmask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) DPRINT("*0x%lx=0x%lx NaT=%d new unat: %p=%lx\n", addr, val, nat, (void *) unat,*unat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) * Return the (rotated) index for floating point register REGNUM (REGNUM must be in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) * range from 32-127, result is in the range from 0-95.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) static inline unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) fph_index (struct pt_regs *regs, long regnum)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) unsigned long rrb_fr = (regs->cr_ifs >> 25) & 0x7f;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) return rotate_reg(96, rrb_fr, (regnum - IA64_FIRST_ROTATING_FR));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) setfpreg (unsigned long regnum, struct ia64_fpreg *fpval, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) struct switch_stack *sw = (struct switch_stack *)regs - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * From EAS-2.5: FPDisableFault has higher priority than Unaligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) * Fault. Thus, when we get here, we know the partition is enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) * To update f32-f127, there are three choices:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) * (1) save f32-f127 to thread.fph and update the values there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * (2) use a gigantic switch statement to directly access the registers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * (3) generate code on the fly to update the desired register
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) * For now, we are using approach (1).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) if (regnum >= IA64_FIRST_ROTATING_FR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) ia64_sync_fph(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) current->thread.fph[fph_index(regs, regnum)] = *fpval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) * pt_regs or switch_stack ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) if (FR_IN_SW(regnum)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) addr = (unsigned long)sw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) addr = (unsigned long)regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) DPRINT("tmp_base=%lx offset=%d\n", addr, FR_OFFS(regnum));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) addr += FR_OFFS(regnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) *(struct ia64_fpreg *)addr = *fpval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) * mark the low partition as being used now
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) * It is highly unlikely that this bit is not already set, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) * let's do it for safety.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) regs->cr_ipsr |= IA64_PSR_MFL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) * Those 2 inline functions generate the spilled versions of the constant floating point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) * registers which can be used with stfX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) float_spill_f0 (struct ia64_fpreg *final)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) ia64_stf_spill(final, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) float_spill_f1 (struct ia64_fpreg *final)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) ia64_stf_spill(final, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) getfpreg (unsigned long regnum, struct ia64_fpreg *fpval, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) struct switch_stack *sw = (struct switch_stack *) regs - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) * From EAS-2.5: FPDisableFault has higher priority than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) * Unaligned Fault. Thus, when we get here, we know the partition is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) * enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) * When regnum > 31, the register is still live and we need to force a save
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) * to current->thread.fph to get access to it. See discussion in setfpreg()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) * for reasons and other ways of doing this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) if (regnum >= IA64_FIRST_ROTATING_FR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) ia64_flush_fph(current);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) *fpval = current->thread.fph[fph_index(regs, regnum)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) * f0 = 0.0, f1= 1.0. Those registers are constant and are thus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) * not saved, we must generate their spilled form on the fly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) switch(regnum) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) float_spill_f0(fpval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) float_spill_f1(fpval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) * pt_regs or switch_stack ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) addr = FR_IN_SW(regnum) ? (unsigned long)sw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) : (unsigned long)regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) DPRINT("is_sw=%d tmp_base=%lx offset=0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) FR_IN_SW(regnum), addr, FR_OFFS(regnum));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) addr += FR_OFFS(regnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) *fpval = *(struct ia64_fpreg *)addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) getreg (unsigned long regnum, unsigned long *val, int *nat, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) struct switch_stack *sw = (struct switch_stack *) regs - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) unsigned long addr, *unat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) if (regnum >= IA64_FIRST_STACKED_GR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) get_rse_reg(regs, regnum, val, nat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) * take care of r0 (read-only always evaluate to 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) if (regnum == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) *val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) if (nat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) *nat = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) * Now look at registers in [0-31] range and init correct UNAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) if (GR_IN_SW(regnum)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) addr = (unsigned long)sw;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) unat = &sw->ar_unat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) addr = (unsigned long)regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) unat = &sw->caller_unat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) DPRINT("addr_base=%lx offset=0x%x\n", addr, GR_OFFS(regnum));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) addr += GR_OFFS(regnum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) *val = *(unsigned long *)addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) * do it only when requested
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) if (nat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) *nat = (*unat >> (addr >> 3 & 0x3f)) & 0x1UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) static void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) emulate_load_updates (update_t type, load_store_t ld, struct pt_regs *regs, unsigned long ifa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) * IMPORTANT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) * Given the way we handle unaligned speculative loads, we should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) * not get to this point in the code but we keep this sanity check,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) * just in case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) if (ld.x6_op == 1 || ld.x6_op == 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) printk(KERN_ERR "%s: register update on speculative load, error\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) if (die_if_kernel("unaligned reference on speculative load with register update\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) regs, 30))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) * at this point, we know that the base register to update is valid i.e.,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) * it's not r0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) if (type == UPD_IMMEDIATE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) unsigned long imm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) * Load +Imm: ldXZ r1=[r3],imm(9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) * form imm9: [13:19] contain the first 7 bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) imm = ld.x << 7 | ld.imm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) * sign extend (1+8bits) if m set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) if (ld.m) imm |= SIGN_EXT9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) * ifa == r3 and we know that the NaT bit on r3 was clear so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) * we can directly use ifa.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) ifa += imm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) setreg(ld.r3, ifa, 0, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) DPRINT("ld.x=%d ld.m=%d imm=%ld r3=0x%lx\n", ld.x, ld.m, imm, ifa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) } else if (ld.m) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) unsigned long r2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) int nat_r2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) * Load +Reg Opcode: ldXZ r1=[r3],r2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) * Note: that we update r3 even in the case of ldfX.a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) * (where the load does not happen)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) * The way the load algorithm works, we know that r3 does not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) * have its NaT bit set (would have gotten NaT consumption
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) * before getting the unaligned fault). So we can use ifa
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) * which equals r3 at this point.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) * IMPORTANT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) * The above statement holds ONLY because we know that we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) * never reach this code when trying to do a ldX.s.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) * If we ever make it to here on an ldfX.s then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) getreg(ld.imm, &r2, &nat_r2, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) ifa += r2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) * propagate Nat r2 -> r3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) setreg(ld.r3, ifa, nat_r2, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) DPRINT("imm=%d r2=%ld r3=0x%lx nat_r2=%d\n",ld.imm, r2, ifa, nat_r2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) emulate_load_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) unsigned int len = 1 << ld.x6_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) unsigned long val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) * r0, as target, doesn't need to be checked because Illegal Instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) * faults have higher priority than unaligned faults.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) * r0 cannot be found as the base as it would never generate an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) * unaligned reference.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) * ldX.a we will emulate load and also invalidate the ALAT entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) * See comment below for explanation on how we handle ldX.a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) if (len != 2 && len != 4 && len != 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) DPRINT("unknown size: x6=%d\n", ld.x6_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) /* this assumes little-endian byte-order: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) if (copy_from_user(&val, (void __user *) ifa, len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) setreg(ld.r1, val, 0, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) * check for updates on any kind of loads
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) if (ld.op == 0x5 || ld.m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) emulate_load_updates(ld.op == 0x5 ? UPD_IMMEDIATE: UPD_REG, ld, regs, ifa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) * handling of various loads (based on EAS2.4):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) * ldX.acq (ordered load):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) * - acquire semantics would have been used, so force fence instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) * ldX.c.clr (check load and clear):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) * - if we get to this handler, it's because the entry was not in the ALAT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) * Therefore the operation reverts to a normal load
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) * ldX.c.nc (check load no clear):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) * - same as previous one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) * ldX.c.clr.acq (ordered check load and clear):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) * - same as above for c.clr part. The load needs to have acquire semantics. So
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) * we use the fence semantics which is stronger and thus ensures correctness.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) * ldX.a (advanced load):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) * - suppose ldX.a r1=[r3]. If we get to the unaligned trap it's because the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) * address doesn't match requested size alignment. This means that we would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) * possibly need more than one load to get the result.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) * The load part can be handled just like a normal load, however the difficult
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) * part is to get the right thing into the ALAT. The critical piece of information
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) * in the base address of the load & size. To do that, a ld.a must be executed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) * clearly any address can be pushed into the table by using ld1.a r1=[r3]. Now
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) * if we use the same target register, we will be okay for the check.a instruction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) * If we look at the store, basically a stX [r3]=r1 checks the ALAT for any entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) * which would overlap within [r3,r3+X] (the size of the load was store in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) * ALAT). If such an entry is found the entry is invalidated. But this is not good
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) * enough, take the following example:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) * r3=3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) * ld4.a r1=[r3]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) * Could be emulated by doing:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) * ld1.a r1=[r3],1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) * store to temporary;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) * ld1.a r1=[r3],1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) * store & shift to temporary;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) * ld1.a r1=[r3],1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) * store & shift to temporary;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) * ld1.a r1=[r3]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) * store & shift to temporary;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) * r1=temporary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) * So in this case, you would get the right value is r1 but the wrong info in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) * the ALAT. Notice that you could do it in reverse to finish with address 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) * but you would still get the size wrong. To get the size right, one needs to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) * execute exactly the same kind of load. You could do it from a aligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) * temporary location, but you would get the address wrong.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) * So no matter what, it is not possible to emulate an advanced load
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) * correctly. But is that really critical ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) * We will always convert ld.a into a normal load with ALAT invalidated. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) * will enable compiler to do optimization where certain code path after ld.a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) * is not required to have ld.c/chk.a, e.g., code path with no intervening stores.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) * If there is a store after the advanced load, one must either do a ld.c.* or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) * chk.a.* to reuse the value stored in the ALAT. Both can "fail" (meaning no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) * entry found in ALAT), and that's perfectly ok because:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) * - ld.c.*, if the entry is not present a normal load is executed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) * - chk.a.*, if the entry is not present, execution jumps to recovery code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) * In either case, the load can be potentially retried in another form.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) * ALAT must be invalidated for the register (so that chk.a or ld.c don't pick
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) * up a stale entry later). The register base update MUST also be performed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) * when the load has the .acq completer then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) * use ordering fence.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) if (ld.x6_op == 0x5 || ld.x6_op == 0xa)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) * invalidate ALAT entry in case of advanced load
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) if (ld.x6_op == 0x2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) invala_gr(ld.r1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) emulate_store_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) unsigned long r2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) unsigned int len = 1 << ld.x6_sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) * if we get to this handler, Nat bits on both r3 and r2 have already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) * been checked. so we don't need to do it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) * extract the value to be stored
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) getreg(ld.imm, &r2, NULL, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) * we rely on the macros in unaligned.h for now i.e.,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) * we let the compiler figure out how to read memory gracefully.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) * We need this switch/case because the way the inline function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) * works. The code is optimized by the compiler and looks like
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) * a single switch/case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) DPRINT("st%d [%lx]=%lx\n", len, ifa, r2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) if (len != 2 && len != 4 && len != 8) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) DPRINT("unknown size: x6=%d\n", ld.x6_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) /* this assumes little-endian byte-order: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) if (copy_to_user((void __user *) ifa, &r2, len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) * stX [r3]=r2,imm(9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) * NOTE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) * ld.r3 can never be r0, because r0 would not generate an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) * unaligned access.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) if (ld.op == 0x5) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) unsigned long imm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) * form imm9: [12:6] contain first 7bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) imm = ld.x << 7 | ld.r1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) * sign extend (8bits) if m set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) if (ld.m) imm |= SIGN_EXT9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) * ifa == r3 (NaT is necessarily cleared)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) ifa += imm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) DPRINT("imm=%lx r3=%lx\n", imm, ifa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) setreg(ld.r3, ifa, 0, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) * we don't have alat_invalidate_multiple() so we need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) * to do the complete flush :-<<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) ia64_invala();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) * stX.rel: use fence instead of release
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) if (ld.x6_op == 0xd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) * floating point operations sizes in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) static const unsigned char float_fsz[4]={
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) 10, /* extended precision (e) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) 8, /* integer (8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) 4, /* single precision (s) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) 8 /* double precision (d) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) mem2float_extended (struct ia64_fpreg *init, struct ia64_fpreg *final)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) ia64_ldfe(6, init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) ia64_stop();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) ia64_stf_spill(final, 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) mem2float_integer (struct ia64_fpreg *init, struct ia64_fpreg *final)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) ia64_ldf8(6, init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) ia64_stop();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) ia64_stf_spill(final, 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) mem2float_single (struct ia64_fpreg *init, struct ia64_fpreg *final)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) ia64_ldfs(6, init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) ia64_stop();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) ia64_stf_spill(final, 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) mem2float_double (struct ia64_fpreg *init, struct ia64_fpreg *final)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) ia64_ldfd(6, init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) ia64_stop();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) ia64_stf_spill(final, 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) float2mem_extended (struct ia64_fpreg *init, struct ia64_fpreg *final)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) ia64_ldf_fill(6, init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) ia64_stop();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) ia64_stfe(final, 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) float2mem_integer (struct ia64_fpreg *init, struct ia64_fpreg *final)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) ia64_ldf_fill(6, init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) ia64_stop();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) ia64_stf8(final, 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) float2mem_single (struct ia64_fpreg *init, struct ia64_fpreg *final)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) ia64_ldf_fill(6, init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) ia64_stop();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) ia64_stfs(final, 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) float2mem_double (struct ia64_fpreg *init, struct ia64_fpreg *final)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) ia64_ldf_fill(6, init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) ia64_stop();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) ia64_stfd(final, 6);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) emulate_load_floatpair (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) struct ia64_fpreg fpr_init[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) struct ia64_fpreg fpr_final[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) unsigned long len = float_fsz[ld.x6_sz];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) * fr0 & fr1 don't need to be checked because Illegal Instruction faults have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) * higher priority than unaligned faults.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) * r0 cannot be found as the base as it would never generate an unaligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) * reference.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) * make sure we get clean buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) memset(&fpr_init, 0, sizeof(fpr_init));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) memset(&fpr_final, 0, sizeof(fpr_final));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) * ldfpX.a: we don't try to emulate anything but we must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) * invalidate the ALAT entry and execute updates, if any.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) if (ld.x6_op != 0x2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) * This assumes little-endian byte-order. Note that there is no "ldfpe"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) * instruction:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) if (copy_from_user(&fpr_init[0], (void __user *) ifa, len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) || copy_from_user(&fpr_init[1], (void __user *) (ifa + len), len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) DPRINT("ld.r1=%d ld.imm=%d x6_sz=%d\n", ld.r1, ld.imm, ld.x6_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) DDUMP("frp_init =", &fpr_init, 2*len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) * XXX fixme
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) * Could optimize inlines by using ldfpX & 2 spills
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) switch( ld.x6_sz ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) mem2float_extended(&fpr_init[0], &fpr_final[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) mem2float_extended(&fpr_init[1], &fpr_final[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) mem2float_integer(&fpr_init[0], &fpr_final[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) mem2float_integer(&fpr_init[1], &fpr_final[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) mem2float_single(&fpr_init[0], &fpr_final[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) mem2float_single(&fpr_init[1], &fpr_final[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) mem2float_double(&fpr_init[0], &fpr_final[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) mem2float_double(&fpr_init[1], &fpr_final[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) DDUMP("fpr_final =", &fpr_final, 2*len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) * XXX fixme
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) * A possible optimization would be to drop fpr_final and directly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) * use the storage from the saved context i.e., the actual final
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) * destination (pt_regs, switch_stack or thread structure).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) setfpreg(ld.r1, &fpr_final[0], regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) setfpreg(ld.imm, &fpr_final[1], regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) * Check for updates: only immediate updates are available for this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) * instruction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) if (ld.m) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) * the immediate is implicit given the ldsz of the operation:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) * single: 8 (2x4) and for all others it's 16 (2x8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) ifa += len<<1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) * IMPORTANT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) * the fact that we force the NaT of r3 to zero is ONLY valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) * as long as we don't come here with a ldfpX.s.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) * For this reason we keep this sanity check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) if (ld.x6_op == 1 || ld.x6_op == 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) printk(KERN_ERR "%s: register update on speculative load pair, error\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) setreg(ld.r3, ifa, 0, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) * Invalidate ALAT entries, if any, for both registers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) if (ld.x6_op == 0x2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) invala_fr(ld.r1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) invala_fr(ld.imm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) emulate_load_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) struct ia64_fpreg fpr_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) struct ia64_fpreg fpr_final;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) unsigned long len = float_fsz[ld.x6_sz];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) * fr0 & fr1 don't need to be checked because Illegal Instruction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) * faults have higher priority than unaligned faults.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) * r0 cannot be found as the base as it would never generate an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) * unaligned reference.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) * make sure we get clean buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) memset(&fpr_init,0, sizeof(fpr_init));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) memset(&fpr_final,0, sizeof(fpr_final));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) * ldfX.a we don't try to emulate anything but we must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) * invalidate the ALAT entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) * See comments in ldX for descriptions on how the various loads are handled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) if (ld.x6_op != 0x2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) if (copy_from_user(&fpr_init, (void __user *) ifa, len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) DPRINT("ld.r1=%d x6_sz=%d\n", ld.r1, ld.x6_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) DDUMP("fpr_init =", &fpr_init, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) * we only do something for x6_op={0,8,9}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) switch( ld.x6_sz ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) mem2float_extended(&fpr_init, &fpr_final);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) mem2float_integer(&fpr_init, &fpr_final);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) mem2float_single(&fpr_init, &fpr_final);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) mem2float_double(&fpr_init, &fpr_final);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) DDUMP("fpr_final =", &fpr_final, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) * XXX fixme
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) * A possible optimization would be to drop fpr_final and directly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) * use the storage from the saved context i.e., the actual final
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) * destination (pt_regs, switch_stack or thread structure).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) setfpreg(ld.r1, &fpr_final, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) * check for updates on any loads
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) if (ld.op == 0x7 || ld.m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) emulate_load_updates(ld.op == 0x7 ? UPD_IMMEDIATE: UPD_REG, ld, regs, ifa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) * invalidate ALAT entry in case of advanced floating point loads
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) if (ld.x6_op == 0x2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) invala_fr(ld.r1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) emulate_store_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) struct ia64_fpreg fpr_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) struct ia64_fpreg fpr_final;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) unsigned long len = float_fsz[ld.x6_sz];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) * make sure we get clean buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) memset(&fpr_init,0, sizeof(fpr_init));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) memset(&fpr_final,0, sizeof(fpr_final));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) * if we get to this handler, Nat bits on both r3 and r2 have already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) * been checked. so we don't need to do it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) * extract the value to be stored
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) getfpreg(ld.imm, &fpr_init, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) * during this step, we extract the spilled registers from the saved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) * context i.e., we refill. Then we store (no spill) to temporary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) * aligned location
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) switch( ld.x6_sz ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) case 0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231) float2mem_extended(&fpr_init, &fpr_final);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) float2mem_integer(&fpr_init, &fpr_final);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) float2mem_single(&fpr_init, &fpr_final);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) case 3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) float2mem_double(&fpr_init, &fpr_final);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) DPRINT("ld.r1=%d x6_sz=%d\n", ld.r1, ld.x6_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) DDUMP("fpr_init =", &fpr_init, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) DDUMP("fpr_final =", &fpr_final, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) if (copy_to_user((void __user *) ifa, &fpr_final, len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) * stfX [r3]=r2,imm(9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) * NOTE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) * ld.r3 can never be r0, because r0 would not generate an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) * unaligned access.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) if (ld.op == 0x7) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) unsigned long imm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) * form imm9: [12:6] contain first 7bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) imm = ld.x << 7 | ld.r1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) * sign extend (8bits) if m set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) if (ld.m)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) imm |= SIGN_EXT9;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) * ifa == r3 (NaT is necessarily cleared)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) ifa += imm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) DPRINT("imm=%lx r3=%lx\n", imm, ifa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) setreg(ld.r3, ifa, 0, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) * we don't have alat_invalidate_multiple() so we need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) * to do the complete flush :-<<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) ia64_invala();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) * Make sure we log the unaligned access, so that user/sysadmin can notice it and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) * eventually fix the program. However, we don't want to do that for every access so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) * pace it with jiffies.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) static DEFINE_RATELIMIT_STATE(logging_rate_limit, 5 * HZ, 5);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) struct ia64_psr *ipsr = ia64_psr(regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) mm_segment_t old_fs = get_fs();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) unsigned long bundle[2];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) unsigned long opcode;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) const struct exception_table_entry *eh = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) unsigned long l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) load_store_t insn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) } u;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) int ret = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) if (ia64_psr(regs)->be) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) /* we don't support big-endian accesses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) if (die_if_kernel("big-endian unaligned accesses are not supported", regs, 0))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) goto force_sigbus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) * Treat kernel accesses for which there is an exception handler entry the same as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) * user-level unaligned accesses. Otherwise, a clever program could trick this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) * handler into reading an arbitrary kernel addresses...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) if (!user_mode(regs))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) eh = search_exception_tables(regs->cr_iip + ia64_psr(regs)->ri);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) if (user_mode(regs) || eh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) if ((current->thread.flags & IA64_THREAD_UAC_SIGBUS) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) goto force_sigbus;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) if (!no_unaligned_warning &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) !(current->thread.flags & IA64_THREAD_UAC_NOPRINT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) __ratelimit(&logging_rate_limit))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) char buf[200]; /* comm[] is at most 16 bytes... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) size_t len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333) len = sprintf(buf, "%s(%d): unaligned access to 0x%016lx, "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) "ip=0x%016lx\n\r", current->comm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) task_pid_nr(current),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) ifa, regs->cr_iip + ipsr->ri);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) * Don't call tty_write_message() if we're in the kernel; we might
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) * be holding locks...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) if (user_mode(regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) struct tty_struct *tty = get_current_tty();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) tty_write_message(tty, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) tty_kref_put(tty);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) buf[len-1] = '\0'; /* drop '\r' */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) /* watch for command names containing %s */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) printk(KERN_WARNING "%s", buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) if (no_unaligned_warning) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) printk_once(KERN_WARNING "%s(%d) encountered an "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) "unaligned exception which required\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) "kernel assistance, which degrades "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) "the performance of the application.\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) "Unaligned exception warnings have "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) "been disabled by the system "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) "administrator\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) "echo 0 > /proc/sys/kernel/ignore-"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) "unaligned-usertrap to re-enable\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) current->comm, task_pid_nr(current));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) if (__ratelimit(&logging_rate_limit)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) printk(KERN_WARNING "kernel unaligned access to 0x%016lx, ip=0x%016lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) ifa, regs->cr_iip + ipsr->ri);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) if (unaligned_dump_stack)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) dump_stack();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) set_fs(KERNEL_DS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) DPRINT("iip=%lx ifa=%lx isr=%lx (ei=%d, sp=%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) regs->cr_iip, ifa, regs->cr_ipsr, ipsr->ri, ipsr->it);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) if (__copy_from_user(bundle, (void __user *) regs->cr_iip, 16))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) goto failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) * extract the instruction from the bundle given the slot number
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) switch (ipsr->ri) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) case 0: u.l = (bundle[0] >> 5); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) case 1: u.l = (bundle[0] >> 46) | (bundle[1] << 18); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) case 2: u.l = (bundle[1] >> 23); break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) opcode = (u.l >> IA64_OPCODE_SHIFT) & IA64_OPCODE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) DPRINT("opcode=%lx ld.qp=%d ld.r1=%d ld.imm=%d ld.r3=%d ld.x=%d ld.hint=%d "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) "ld.x6=0x%x ld.m=%d ld.op=%d\n", opcode, u.insn.qp, u.insn.r1, u.insn.imm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) u.insn.r3, u.insn.x, u.insn.hint, u.insn.x6_sz, u.insn.m, u.insn.op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) * IMPORTANT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) * Notice that the switch statement DOES not cover all possible instructions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) * that DO generate unaligned references. This is made on purpose because for some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) * instructions it DOES NOT make sense to try and emulate the access. Sometimes it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) * is WRONG to try and emulate. Here is a list of instruction we don't emulate i.e.,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) * the program will get a signal and die:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) * load/store:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) * - ldX.spill
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404) * - stX.spill
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) * Reason: RNATs are based on addresses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) * - ld16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) * - st16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) * Reason: ld16 and st16 are supposed to occur in a single
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) * memory op
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) * synchronization:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) * - cmpxchg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) * - fetchadd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) * - xchg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) * Reason: ATOMIC operations cannot be emulated properly using multiple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) * instructions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) * speculative loads:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) * - ldX.sZ
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) * Reason: side effects, code must be ready to deal with failure so simpler
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) * to let the load fail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) * ---------------------------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) * XXX fixme
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) * I would like to get rid of this switch case and do something
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) * more elegant.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) switch (opcode) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) case LDS_OP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) case LDSA_OP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) if (u.insn.x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) /* oops, really a semaphore op (cmpxchg, etc) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) goto failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) case LDS_IMM_OP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) case LDSA_IMM_OP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) case LDFS_OP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) case LDFSA_OP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) case LDFS_IMM_OP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) * The instruction will be retried with deferred exceptions turned on, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) * we should get Nat bit installed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) * IMPORTANT: When PSR_ED is set, the register & immediate update forms
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) * are actually executed even though the operation failed. So we don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) * need to take care of this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) DPRINT("forcing PSR_ED\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) regs->cr_ipsr |= IA64_PSR_ED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) case LD_OP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) case LDA_OP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) case LDBIAS_OP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) case LDACQ_OP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) case LDCCLR_OP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) case LDCNC_OP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) case LDCCLRACQ_OP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) if (u.insn.x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) /* oops, really a semaphore op (cmpxchg, etc) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) goto failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) case LD_IMM_OP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) case LDA_IMM_OP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) case LDBIAS_IMM_OP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) case LDACQ_IMM_OP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) case LDCCLR_IMM_OP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) case LDCNC_IMM_OP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) case LDCCLRACQ_IMM_OP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) ret = emulate_load_int(ifa, u.insn, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) case ST_OP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) case STREL_OP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) if (u.insn.x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) /* oops, really a semaphore op (cmpxchg, etc) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) goto failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) case ST_IMM_OP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) case STREL_IMM_OP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) ret = emulate_store_int(ifa, u.insn, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) case LDF_OP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) case LDFA_OP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) case LDFCCLR_OP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) case LDFCNC_OP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) if (u.insn.x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) ret = emulate_load_floatpair(ifa, u.insn, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) ret = emulate_load_float(ifa, u.insn, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) case LDF_IMM_OP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) case LDFA_IMM_OP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) case LDFCCLR_IMM_OP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) case LDFCNC_IMM_OP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) ret = emulate_load_float(ifa, u.insn, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) case STF_OP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) case STF_IMM_OP:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) ret = emulate_store_float(ifa, u.insn, regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) goto failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) DPRINT("ret=%d\n", ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511) goto failure;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) if (ipsr->ri == 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) * given today's architecture this case is not likely to happen because a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) * memory access instruction (M) can never be in the last slot of a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) * bundle. But let's keep it for now.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) regs->cr_iip += 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) ipsr->ri = (ipsr->ri + 1) & 0x3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) DPRINT("ipsr->ri=%d iip=%lx\n", ipsr->ri, regs->cr_iip);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) set_fs(old_fs); /* restore original address limit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) failure:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) /* something went wrong... */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) if (!user_mode(regs)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) if (eh) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) ia64_handle_exception(regs, eh);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) if (die_if_kernel("error during unaligned kernel access\n", regs, ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) /* NOT_REACHED */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) force_sigbus:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *) ifa,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) 0, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) }