Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright (C) 2014-2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include <linux/elf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/ftrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/sort.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) static struct plt_entry __get_adrp_add_pair(u64 dst, u64 pc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) 					    enum aarch64_insn_register reg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 	u32 adrp, add;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 	adrp = aarch64_insn_gen_adr(pc, dst, reg, AARCH64_INSN_ADR_TYPE_ADRP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 	add = aarch64_insn_gen_add_sub_imm(reg, reg, dst % SZ_4K,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 					   AARCH64_INSN_VARIANT_64BIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 					   AARCH64_INSN_ADSB_ADD);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 	return (struct plt_entry){ cpu_to_le32(adrp), cpu_to_le32(add) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) struct plt_entry get_plt_entry(u64 dst, void *pc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	struct plt_entry plt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	static u32 br;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	if (!br)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 		br = aarch64_insn_gen_branch_reg(AARCH64_INSN_REG_16,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 						 AARCH64_INSN_BRANCH_NOLINK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	plt = __get_adrp_add_pair(dst, (u64)pc, AARCH64_INSN_REG_16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	plt.br = cpu_to_le32(br);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	return plt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) bool plt_entries_equal(const struct plt_entry *a, const struct plt_entry *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	u64 p, q;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	 * Check whether both entries refer to the same target:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	 * do the cheapest checks first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	 * If the 'add' or 'br' opcodes are different, then the target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	 * cannot be the same.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	if (a->add != b->add || a->br != b->br)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	p = ALIGN_DOWN((u64)a, SZ_4K);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	q = ALIGN_DOWN((u64)b, SZ_4K);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	 * If the 'adrp' opcodes are the same then we just need to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	 * that they refer to the same 4k region.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	if (a->adrp == b->adrp && p == q)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	return (p + aarch64_insn_adrp_get_offset(le32_to_cpu(a->adrp))) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	       (q + aarch64_insn_adrp_get_offset(le32_to_cpu(b->adrp)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) static bool in_init(const struct module *mod, void *loc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	return (u64)loc - (u64)mod->init_layout.base < mod->init_layout.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) u64 module_emit_plt_entry(struct module *mod, Elf64_Shdr *sechdrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 			  void *loc, const Elf64_Rela *rela,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 			  Elf64_Sym *sym)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 							  &mod->arch.init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	struct plt_entry *plt = (struct plt_entry *)sechdrs[pltsec->plt_shndx].sh_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	int i = pltsec->plt_num_entries;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	int j = i - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 	u64 val = sym->st_value + rela->r_addend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	if (is_forbidden_offset_for_adrp(&plt[i].adrp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 		i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	plt[i] = get_plt_entry(val, &plt[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	 * Check if the entry we just created is a duplicate. Given that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	 * relocations are sorted, this will be the last entry we allocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	 * (if one exists).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	if (j >= 0 && plt_entries_equal(plt + i, plt + j))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 		return (u64)&plt[j];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	pltsec->plt_num_entries += i - j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	if (WARN_ON(pltsec->plt_num_entries > pltsec->plt_max_entries))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	return (u64)&plt[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #ifdef CONFIG_ARM64_ERRATUM_843419
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) u64 module_emit_veneer_for_adrp(struct module *mod, Elf64_Shdr *sechdrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 				void *loc, u64 val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 							  &mod->arch.init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	struct plt_entry *plt = (struct plt_entry *)sechdrs[pltsec->plt_shndx].sh_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	int i = pltsec->plt_num_entries++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	u32 br;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	int rd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	if (WARN_ON(pltsec->plt_num_entries > pltsec->plt_max_entries))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	if (is_forbidden_offset_for_adrp(&plt[i].adrp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 		i = pltsec->plt_num_entries++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	/* get the destination register of the ADRP instruction */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 					  le32_to_cpup((__le32 *)loc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	br = aarch64_insn_gen_branch_imm((u64)&plt[i].br, (u64)loc + 4,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 					 AARCH64_INSN_BRANCH_NOLINK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	plt[i] = __get_adrp_add_pair(val, (u64)&plt[i], rd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	plt[i].br = cpu_to_le32(br);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	return (u64)&plt[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) #define cmp_3way(a,b)	((a) < (b) ? -1 : (a) > (b))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) static int cmp_rela(const void *a, const void *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	const Elf64_Rela *x = a, *y = b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	/* sort by type, symbol index and addend */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	i = cmp_3way(ELF64_R_TYPE(x->r_info), ELF64_R_TYPE(y->r_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	if (i == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 		i = cmp_3way(ELF64_R_SYM(x->r_info), ELF64_R_SYM(y->r_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	if (i == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 		i = cmp_3way(x->r_addend, y->r_addend);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) static bool duplicate_rel(const Elf64_Rela *rela, int num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	 * Entries are sorted by type, symbol index and addend. That means
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	 * that, if a duplicate entry exists, it must be in the preceding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	 * slot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	return num > 0 && cmp_rela(rela + num, rela + num - 1) == 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) static unsigned int count_plts(Elf64_Sym *syms, Elf64_Rela *rela, int num,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 			       Elf64_Word dstidx, Elf_Shdr *dstsec)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	unsigned int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	Elf64_Sym *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	for (i = 0; i < num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 		u64 min_align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 		switch (ELF64_R_TYPE(rela[i].r_info)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 		case R_AARCH64_JUMP26:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 		case R_AARCH64_CALL26:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 			if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 			 * We only have to consider branch targets that resolve
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 			 * to symbols that are defined in a different section.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 			 * This is not simply a heuristic, it is a fundamental
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 			 * limitation, since there is no guaranteed way to emit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 			 * PLT entries sufficiently close to the branch if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 			 * section size exceeds the range of a branch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 			 * instruction. So ignore relocations against defined
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 			 * symbols if they live in the same section as the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 			 * relocation target.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 			s = syms + ELF64_R_SYM(rela[i].r_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 			if (s->st_shndx == dstidx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 			 * Jump relocations with non-zero addends against
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 			 * undefined symbols are supported by the ELF spec, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 			 * do not occur in practice (e.g., 'jump n bytes past
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 			 * the entry point of undefined function symbol f').
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 			 * So we need to support them, but there is no need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 			 * take them into consideration when trying to optimize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 			 * this code. So let's only check for duplicates when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 			 * the addend is zero: this allows us to record the PLT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 			 * entry address in the symbol table itself, rather than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 			 * having to search the list for duplicates each time we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 			 * emit one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 			if (rela[i].r_addend != 0 || !duplicate_rel(rela, i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 				ret++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 		case R_AARCH64_ADR_PREL_PG_HI21_NC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 		case R_AARCH64_ADR_PREL_PG_HI21:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 			if (!IS_ENABLED(CONFIG_ARM64_ERRATUM_843419) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 			    !cpus_have_const_cap(ARM64_WORKAROUND_843419))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 			 * Determine the minimal safe alignment for this ADRP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 			 * instruction: the section alignment at which it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 			 * guaranteed not to appear at a vulnerable offset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 			 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 			 * This comes down to finding the least significant zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 			 * bit in bits [11:3] of the section offset, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 			 * increasing the section's alignment so that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 			 * resulting address of this instruction is guaranteed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 			 * to equal the offset in that particular bit (as well
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 			 * as all less signficant bits). This ensures that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 			 * address modulo 4 KB != 0xfff8 or 0xfffc (which would
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 			 * have all ones in bits [11:3])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 			min_align = 2ULL << ffz(rela[i].r_offset | 0x7);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 			 * Allocate veneer space for each ADRP that may appear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 			 * at a vulnerable offset nonetheless. At relocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 			 * time, some of these will remain unused since some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 			 * ADRP instructions can be patched to ADR instructions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 			 * instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 			if (min_align > SZ_4K)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 				ret++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 			else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 				dstsec->sh_addralign = max(dstsec->sh_addralign,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 							   min_align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	if (IS_ENABLED(CONFIG_ARM64_ERRATUM_843419) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	    cpus_have_const_cap(ARM64_WORKAROUND_843419))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 		 * Add some slack so we can skip PLT slots that may trigger
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 		 * the erratum due to the placement of the ADRP instruction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 		ret += DIV_ROUND_UP(ret, (SZ_4K / sizeof(struct plt_entry)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) static bool branch_rela_needs_plt(Elf64_Sym *syms, Elf64_Rela *rela,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 				  Elf64_Word dstidx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	Elf64_Sym *s = syms + ELF64_R_SYM(rela->r_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	if (s->st_shndx == dstidx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	return ELF64_R_TYPE(rela->r_info) == R_AARCH64_JUMP26 ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	       ELF64_R_TYPE(rela->r_info) == R_AARCH64_CALL26;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) /* Group branch PLT relas at the front end of the array. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) static int partition_branch_plt_relas(Elf64_Sym *syms, Elf64_Rela *rela,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 				      int numrels, Elf64_Word dstidx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	int i = 0, j = numrels - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	if (!IS_ENABLED(CONFIG_RANDOMIZE_BASE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	while (i < j) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 		if (branch_rela_needs_plt(syms, &rela[i], dstidx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 			i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 		else if (branch_rela_needs_plt(syms, &rela[j], dstidx))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 			swap(rela[i], rela[j]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 			j--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 			      char *secstrings, struct module *mod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	bool copy_rela_for_fips140 = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	unsigned long core_plts = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	unsigned long init_plts = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	Elf64_Sym *syms = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	Elf_Shdr *pltsec, *tramp = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	 * Find the empty .plt section so we can expand it to store the PLT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	 * entries. Record the symtab address as well.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	for (i = 0; i < ehdr->e_shnum; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 		if (!strcmp(secstrings + sechdrs[i].sh_name, ".plt"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 			mod->arch.core.plt_shndx = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 		else if (!strcmp(secstrings + sechdrs[i].sh_name, ".init.plt"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 			mod->arch.init.plt_shndx = i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 		else if (!strcmp(secstrings + sechdrs[i].sh_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 				 ".text.ftrace_trampoline"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 			tramp = sechdrs + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 		else if (sechdrs[i].sh_type == SHT_SYMTAB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 			syms = (Elf64_Sym *)sechdrs[i].sh_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	if (!mod->arch.core.plt_shndx || !mod->arch.init.plt_shndx) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 		pr_err("%s: module PLT section(s) missing\n", mod->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 		return -ENOEXEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	if (!syms) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 		pr_err("%s: module symtab section missing\n", mod->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 		return -ENOEXEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	if (IS_ENABLED(CONFIG_CRYPTO_FIPS140) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	    !strcmp(mod->name, "fips140"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 		copy_rela_for_fips140 = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	for (i = 0; i < ehdr->e_shnum; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 		Elf64_Rela *rels = (void *)ehdr + sechdrs[i].sh_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 		int nents, numrels = sechdrs[i].sh_size / sizeof(Elf64_Rela);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 		Elf64_Shdr *dstsec = sechdrs + sechdrs[i].sh_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 		if (sechdrs[i].sh_type != SHT_RELA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) #ifdef CONFIG_CRYPTO_FIPS140
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 		if (copy_rela_for_fips140 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 		    !strcmp(secstrings + dstsec->sh_name, ".rodata")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 			void *p = kmemdup(rels, numrels * sizeof(Elf64_Rela),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 					  GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 			if (!p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 				pr_err("fips140: failed to allocate .rodata RELA buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 				return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 			mod->arch.rodata_relocations = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 			mod->arch.num_rodata_relocations = numrels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 		/* ignore relocations that operate on non-exec sections */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 		if (!(dstsec->sh_flags & SHF_EXECINSTR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) #ifdef CONFIG_CRYPTO_FIPS140
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 		if (copy_rela_for_fips140 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 		    !strcmp(secstrings + dstsec->sh_name, ".text")) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 			void *p = kmemdup(rels, numrels * sizeof(Elf64_Rela),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 					  GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 			if (!p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 				pr_err("fips140: failed to allocate .text RELA buffer\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 				return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 			mod->arch.text_relocations = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 			mod->arch.num_text_relocations = numrels;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 		 * sort branch relocations requiring a PLT by type, symbol index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 		 * and addend
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 		nents = partition_branch_plt_relas(syms, rels, numrels,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 						   sechdrs[i].sh_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 		if (nents)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 			sort(rels, nents, sizeof(Elf64_Rela), cmp_rela, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 		if (!str_has_prefix(secstrings + dstsec->sh_name, ".init"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 			core_plts += count_plts(syms, rels, numrels,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 						sechdrs[i].sh_info, dstsec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 			init_plts += count_plts(syms, rels, numrels,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 						sechdrs[i].sh_info, dstsec);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	pltsec = sechdrs + mod->arch.core.plt_shndx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	pltsec->sh_type = SHT_NOBITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	pltsec->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	pltsec->sh_addralign = L1_CACHE_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	pltsec->sh_size = (core_plts  + 1) * sizeof(struct plt_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	mod->arch.core.plt_num_entries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	mod->arch.core.plt_max_entries = core_plts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 	pltsec = sechdrs + mod->arch.init.plt_shndx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	pltsec->sh_type = SHT_NOBITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	pltsec->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 	pltsec->sh_addralign = L1_CACHE_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	pltsec->sh_size = (init_plts + 1) * sizeof(struct plt_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	mod->arch.init.plt_num_entries = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	mod->arch.init.plt_max_entries = init_plts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	if (tramp) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 		tramp->sh_type = SHT_NOBITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 		tramp->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 		tramp->sh_addralign = __alignof__(struct plt_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 		tramp->sh_size = NR_FTRACE_PLTS * sizeof(struct plt_entry);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) }