^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * AArch64 loadable module support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2012 ARM Limited
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Author: Will Deacon <will.deacon@arm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/elf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/ftrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/kasan.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/moduleloader.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <asm/alternative.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <asm/insn.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <asm/sections.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) void *module_alloc(unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) u64 module_alloc_end = module_alloc_base + MODULES_VSIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) gfp_t gfp_mask = GFP_KERNEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) void *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) /* Silence the initial allocation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) gfp_mask |= __GFP_NOWARN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) if (IS_ENABLED(CONFIG_KASAN_GENERIC) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) IS_ENABLED(CONFIG_KASAN_SW_TAGS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) /* don't exceed the static module region - see below */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) module_alloc_end = MODULES_END;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) module_alloc_end, gfp_mask, PAGE_KERNEL, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) NUMA_NO_NODE, __builtin_return_address(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) (IS_ENABLED(CONFIG_KASAN_VMALLOC) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) !IS_ENABLED(CONFIG_KASAN_SW_TAGS))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * KASAN without KASAN_VMALLOC can only deal with module
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * allocations being served from the reserved module region,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * since the remainder of the vmalloc region is already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * backed by zero shadow pages, and punching holes into it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * is non-trivial. Since the module region is not randomized
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * when KASAN is enabled without KASAN_VMALLOC, it is even
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * less likely that the module region gets exhausted, so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * can simply omit this fallback in that case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) module_alloc_base + SZ_2G, GFP_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) PAGE_KERNEL, 0, NUMA_NO_NODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) __builtin_return_address(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) if (p && (kasan_module_alloc(p, size) < 0)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) vfree(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) return p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) enum aarch64_reloc_op {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) RELOC_OP_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) RELOC_OP_ABS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) RELOC_OP_PREL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) RELOC_OP_PAGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) static u64 do_reloc(enum aarch64_reloc_op reloc_op, __le32 *place, u64 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) switch (reloc_op) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) case RELOC_OP_ABS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) case RELOC_OP_PREL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) return val - (u64)place;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) case RELOC_OP_PAGE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) return (val & ~0xfff) - ((u64)place & ~0xfff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) case RELOC_OP_NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) pr_err("do_reloc: unknown relocation operation %d\n", reloc_op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) s64 sval = do_reloc(op, place, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * The ELF psABI for AArch64 documents the 16-bit and 32-bit place
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * relative and absolute relocations as having a range of [-2^15, 2^16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * or [-2^31, 2^32), respectively. However, in order to be able to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * detect overflows reliably, we have to choose whether we interpret
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * such quantities as signed or as unsigned, and stick with it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * The way we organize our address space requires a signed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * interpretation of 32-bit relative references, so let's use that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * for all R_AARCH64_PRELxx relocations. This means our upper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * bound for overflow detection should be Sxx_MAX rather than Uxx_MAX.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) switch (len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) case 16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) *(s16 *)place = sval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) switch (op) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) case RELOC_OP_ABS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) if (sval < 0 || sval > U16_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) return -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) case RELOC_OP_PREL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) if (sval < S16_MIN || sval > S16_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) return -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) pr_err("Invalid 16-bit data relocation (%d)\n", op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) case 32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) *(s32 *)place = sval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) switch (op) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) case RELOC_OP_ABS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) if (sval < 0 || sval > U32_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) return -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) case RELOC_OP_PREL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) if (sval < S32_MIN || sval > S32_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) return -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) pr_err("Invalid 32-bit data relocation (%d)\n", op);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) case 64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) *(s64 *)place = sval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) pr_err("Invalid length (%d) for data relocation\n", len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) enum aarch64_insn_movw_imm_type {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) AARCH64_INSN_IMM_MOVNZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) AARCH64_INSN_IMM_MOVKZ,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) static int reloc_insn_movw(enum aarch64_reloc_op op, __le32 *place, u64 val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) int lsb, enum aarch64_insn_movw_imm_type imm_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) u64 imm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) s64 sval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) u32 insn = le32_to_cpu(*place);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) sval = do_reloc(op, place, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) imm = sval >> lsb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) if (imm_type == AARCH64_INSN_IMM_MOVNZ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * For signed MOVW relocations, we have to manipulate the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * instruction encoding depending on whether or not the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * immediate is less than zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) insn &= ~(3 << 29);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (sval >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) /* >=0: Set the instruction to MOVZ (opcode 10b). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) insn |= 2 << 29;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * <0: Set the instruction to MOVN (opcode 00b).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * Since we've masked the opcode already, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * don't need to do anything other than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * inverting the new immediate field.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) imm = ~imm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) /* Update the instruction with the new encoding. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) *place = cpu_to_le32(insn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if (imm > U16_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) return -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) static int reloc_insn_imm(enum aarch64_reloc_op op, __le32 *place, u64 val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) int lsb, int len, enum aarch64_insn_imm_type imm_type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) u64 imm, imm_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) s64 sval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) u32 insn = le32_to_cpu(*place);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) /* Calculate the relocation value. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) sval = do_reloc(op, place, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) sval >>= lsb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) /* Extract the value bits and shift them to bit 0. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) imm_mask = (BIT(lsb + len) - 1) >> lsb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) imm = sval & imm_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) /* Update the instruction's immediate field. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) insn = aarch64_insn_encode_immediate(imm_type, insn, imm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) *place = cpu_to_le32(insn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * Extract the upper value bits (including the sign bit) and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * shift them to bit 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * Overflow has occurred if the upper bits are not all equal to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) * the sign bit of the value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) if ((u64)(sval + 1) >= 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) return -ERANGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) static int reloc_insn_adrp(struct module *mod, Elf64_Shdr *sechdrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) __le32 *place, u64 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) u32 insn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) if (!is_forbidden_offset_for_adrp(place))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) return reloc_insn_imm(RELOC_OP_PAGE, place, val, 12, 21,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) AARCH64_INSN_IMM_ADR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) /* patch ADRP to ADR if it is in range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) if (!reloc_insn_imm(RELOC_OP_PREL, place, val & ~0xfff, 0, 21,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) AARCH64_INSN_IMM_ADR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) insn = le32_to_cpu(*place);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) insn &= ~BIT(31);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) /* out of range for ADR -> emit a veneer */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) val = module_emit_veneer_for_adrp(mod, sechdrs, place, val & ~0xfff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) if (!val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) return -ENOEXEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) insn = aarch64_insn_gen_branch_imm((u64)place, val,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) AARCH64_INSN_BRANCH_NOLINK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) *place = cpu_to_le32(insn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) int apply_relocate_add(Elf64_Shdr *sechdrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) const char *strtab,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) unsigned int symindex,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) unsigned int relsec,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) struct module *me)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) int ovf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) bool overflow_check;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) Elf64_Sym *sym;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) void *loc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) /* loc corresponds to P in the AArch64 ELF document. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) + rel[i].r_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) /* sym is the ELF symbol we're referring to. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) + ELF64_R_SYM(rel[i].r_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) /* val corresponds to (S + A) in the AArch64 ELF document. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) val = sym->st_value + rel[i].r_addend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) /* Check for overflow by default. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) overflow_check = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) /* Perform the static relocation. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) switch (ELF64_R_TYPE(rel[i].r_info)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) /* Null relocations. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) case R_ARM_NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) case R_AARCH64_NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) ovf = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) /* Data relocations. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) case R_AARCH64_ABS64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) overflow_check = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) ovf = reloc_data(RELOC_OP_ABS, loc, val, 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) case R_AARCH64_ABS32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) ovf = reloc_data(RELOC_OP_ABS, loc, val, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) case R_AARCH64_ABS16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) ovf = reloc_data(RELOC_OP_ABS, loc, val, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) case R_AARCH64_PREL64:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) overflow_check = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) ovf = reloc_data(RELOC_OP_PREL, loc, val, 64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) case R_AARCH64_PREL32:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) ovf = reloc_data(RELOC_OP_PREL, loc, val, 32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) case R_AARCH64_PREL16:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) ovf = reloc_data(RELOC_OP_PREL, loc, val, 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) /* MOVW instruction relocations. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) case R_AARCH64_MOVW_UABS_G0_NC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) overflow_check = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) case R_AARCH64_MOVW_UABS_G0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) AARCH64_INSN_IMM_MOVKZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) case R_AARCH64_MOVW_UABS_G1_NC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) overflow_check = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) case R_AARCH64_MOVW_UABS_G1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) AARCH64_INSN_IMM_MOVKZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) case R_AARCH64_MOVW_UABS_G2_NC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) overflow_check = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) case R_AARCH64_MOVW_UABS_G2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) AARCH64_INSN_IMM_MOVKZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) case R_AARCH64_MOVW_UABS_G3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) /* We're using the top bits so we can't overflow. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) overflow_check = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) AARCH64_INSN_IMM_MOVKZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) case R_AARCH64_MOVW_SABS_G0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) AARCH64_INSN_IMM_MOVNZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) case R_AARCH64_MOVW_SABS_G1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) AARCH64_INSN_IMM_MOVNZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) case R_AARCH64_MOVW_SABS_G2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) AARCH64_INSN_IMM_MOVNZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) case R_AARCH64_MOVW_PREL_G0_NC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) overflow_check = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) AARCH64_INSN_IMM_MOVKZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) case R_AARCH64_MOVW_PREL_G0:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) AARCH64_INSN_IMM_MOVNZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) case R_AARCH64_MOVW_PREL_G1_NC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) overflow_check = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) AARCH64_INSN_IMM_MOVKZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) case R_AARCH64_MOVW_PREL_G1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) AARCH64_INSN_IMM_MOVNZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) case R_AARCH64_MOVW_PREL_G2_NC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) overflow_check = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) AARCH64_INSN_IMM_MOVKZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) case R_AARCH64_MOVW_PREL_G2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) AARCH64_INSN_IMM_MOVNZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) case R_AARCH64_MOVW_PREL_G3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) /* We're using the top bits so we can't overflow. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) overflow_check = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) AARCH64_INSN_IMM_MOVNZ);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) /* Immediate instruction relocations. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) case R_AARCH64_LD_PREL_LO19:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) AARCH64_INSN_IMM_19);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) case R_AARCH64_ADR_PREL_LO21:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) AARCH64_INSN_IMM_ADR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) case R_AARCH64_ADR_PREL_PG_HI21_NC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) overflow_check = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) case R_AARCH64_ADR_PREL_PG_HI21:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) ovf = reloc_insn_adrp(me, sechdrs, loc, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (ovf && ovf != -ERANGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) return ovf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) case R_AARCH64_ADD_ABS_LO12_NC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) case R_AARCH64_LDST8_ABS_LO12_NC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) overflow_check = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) AARCH64_INSN_IMM_12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) case R_AARCH64_LDST16_ABS_LO12_NC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) overflow_check = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) AARCH64_INSN_IMM_12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) case R_AARCH64_LDST32_ABS_LO12_NC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) overflow_check = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) AARCH64_INSN_IMM_12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) case R_AARCH64_LDST64_ABS_LO12_NC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) overflow_check = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) AARCH64_INSN_IMM_12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) case R_AARCH64_LDST128_ABS_LO12_NC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) overflow_check = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) AARCH64_INSN_IMM_12);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) case R_AARCH64_TSTBR14:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) AARCH64_INSN_IMM_14);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) case R_AARCH64_CONDBR19:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) AARCH64_INSN_IMM_19);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) case R_AARCH64_JUMP26:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) case R_AARCH64_CALL26:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) AARCH64_INSN_IMM_26);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) ovf == -ERANGE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) val = module_emit_plt_entry(me, sechdrs, loc, &rel[i], sym);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) if (!val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) return -ENOEXEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 26, AARCH64_INSN_IMM_26);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) pr_err("module %s: unsupported RELA relocation: %llu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) me->name, ELF64_R_TYPE(rel[i].r_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) return -ENOEXEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) if (overflow_check && ovf == -ERANGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) goto overflow;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) overflow:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) pr_err("module %s: overflow in relocation type %d val %Lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) me->name, (int)ELF64_R_TYPE(rel[i].r_info), val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) return -ENOEXEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) const Elf_Shdr *sechdrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) const Elf_Shdr *s, *se;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) if (strcmp(name, secstrs + s->sh_name) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) return s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) static inline void __init_plt(struct plt_entry *plt, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) *plt = get_plt_entry(addr, plt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) static int module_init_ftrace_plt(const Elf_Ehdr *hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) const Elf_Shdr *sechdrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) struct module *mod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) #if defined(CONFIG_ARM64_MODULE_PLTS) && defined(CONFIG_DYNAMIC_FTRACE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) const Elf_Shdr *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) struct plt_entry *plts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) s = find_section(hdr, sechdrs, ".text.ftrace_trampoline");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) if (!s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) return -ENOEXEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) plts = (void *)s->sh_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) __init_plt(&plts[FTRACE_PLT_IDX], FTRACE_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) __init_plt(&plts[FTRACE_REGS_PLT_IDX], FTRACE_REGS_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) mod->arch.ftrace_trampolines = plts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) int module_finalize(const Elf_Ehdr *hdr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) const Elf_Shdr *sechdrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) struct module *me)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) const Elf_Shdr *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) s = find_section(hdr, sechdrs, ".altinstructions");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) if (s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) apply_alternatives_module((void *)s->sh_addr, s->sh_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) return module_init_ftrace_plt(hdr, sechdrs, me);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) }