Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Copyright (C) 2014-2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include <linux/elf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/ftrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/sort.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/moduleloader.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <asm/cache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <asm/opcodes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #ifdef CONFIG_THUMB2_KERNEL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #define PLT_ENT_LDR		__opcode_to_mem_thumb32(0xf8dff000 | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) 							(PLT_ENT_STRIDE - 4))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #define PLT_ENT_LDR		__opcode_to_mem_arm(0xe59ff000 | \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 						    (PLT_ENT_STRIDE - 8))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) static const u32 fixed_plts[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #ifdef CONFIG_DYNAMIC_FTRACE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 	FTRACE_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 	MCOUNT_ADDR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) static bool in_init(const struct module *mod, unsigned long loc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	return loc - (u32)mod->init_layout.base < mod->init_layout.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) static void prealloc_fixed(struct mod_plt_sec *pltsec, struct plt_entries *plt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	if (!ARRAY_SIZE(fixed_plts) || pltsec->plt_count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	pltsec->plt_count = ARRAY_SIZE(fixed_plts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	for (i = 0; i < ARRAY_SIZE(plt->ldr); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 		plt->ldr[i] = PLT_ENT_LDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	BUILD_BUG_ON(sizeof(fixed_plts) > sizeof(plt->lit));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	memcpy(plt->lit, fixed_plts, sizeof(fixed_plts));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) u32 get_module_plt(struct module *mod, unsigned long loc, Elf32_Addr val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	struct mod_plt_sec *pltsec = !in_init(mod, loc) ? &mod->arch.core :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 							  &mod->arch.init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	struct plt_entries *plt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	/* cache the address, ELF header is available only during module load */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	if (!pltsec->plt_ent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 		pltsec->plt_ent = (struct plt_entries *)pltsec->plt->sh_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	plt = pltsec->plt_ent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	prealloc_fixed(pltsec, plt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	for (idx = 0; idx < ARRAY_SIZE(fixed_plts); ++idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 		if (plt->lit[idx] == val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 			return (u32)&plt->ldr[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	 * Look for an existing entry pointing to 'val'. Given that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	 * relocations are sorted, this will be the last entry we allocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	 * (if one exists).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	if (pltsec->plt_count > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 		plt += (pltsec->plt_count - 1) / PLT_ENT_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 		idx = (pltsec->plt_count - 1) % PLT_ENT_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 		if (plt->lit[idx] == val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 			return (u32)&plt->ldr[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 		idx = (idx + 1) % PLT_ENT_COUNT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 		if (!idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 			plt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	pltsec->plt_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	BUG_ON(pltsec->plt_count * PLT_ENT_SIZE > pltsec->plt->sh_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	if (!idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 		/* Populate a new set of entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 		*plt = (struct plt_entries){
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 			{ [0 ... PLT_ENT_COUNT - 1] = PLT_ENT_LDR, },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 			{ val, }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 		};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 		plt->lit[idx] = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	return (u32)&plt->ldr[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #define cmp_3way(a,b)	((a) < (b) ? -1 : (a) > (b))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static int cmp_rel(const void *a, const void *b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	const Elf32_Rel *x = a, *y = b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 	/* sort by type and symbol index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 	i = cmp_3way(ELF32_R_TYPE(x->r_info), ELF32_R_TYPE(y->r_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	if (i == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 		i = cmp_3way(ELF32_R_SYM(x->r_info), ELF32_R_SYM(y->r_info));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	return i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static bool is_zero_addend_relocation(Elf32_Addr base, const Elf32_Rel *rel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	u32 *tval = (u32 *)(base + rel->r_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	 * Do a bitwise compare on the raw addend rather than fully decoding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	 * the offset and doing an arithmetic comparison.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	 * Note that a zero-addend jump/call relocation is encoded taking the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	 * PC bias into account, i.e., -8 for ARM and -4 for Thumb2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	switch (ELF32_R_TYPE(rel->r_info)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 		u16 upper, lower;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	case R_ARM_THM_CALL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	case R_ARM_THM_JUMP24:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 		upper = __mem_to_opcode_thumb16(((u16 *)tval)[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 		lower = __mem_to_opcode_thumb16(((u16 *)tval)[1]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 		return (upper & 0x7ff) == 0x7ff && (lower & 0x2fff) == 0x2ffe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	case R_ARM_CALL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	case R_ARM_PC24:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	case R_ARM_JUMP24:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 		return (__mem_to_opcode_arm(*tval) & 0xffffff) == 0xfffffe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) static bool duplicate_rel(Elf32_Addr base, const Elf32_Rel *rel, int num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	const Elf32_Rel *prev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	 * Entries are sorted by type and symbol index. That means that,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	 * if a duplicate entry exists, it must be in the preceding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	 * slot.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	if (!num)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	prev = rel + num - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	return cmp_rel(rel + num, prev) == 0 &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	       is_zero_addend_relocation(base, prev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) /* Count how many PLT entries we may need */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) static unsigned int count_plts(const Elf32_Sym *syms, Elf32_Addr base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 			       const Elf32_Rel *rel, int num, Elf32_Word dstidx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	unsigned int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	const Elf32_Sym *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	for (i = 0; i < num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 		switch (ELF32_R_TYPE(rel[i].r_info)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 		case R_ARM_CALL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 		case R_ARM_PC24:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 		case R_ARM_JUMP24:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 		case R_ARM_THM_CALL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 		case R_ARM_THM_JUMP24:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 			 * We only have to consider branch targets that resolve
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 			 * to symbols that are defined in a different section.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 			 * This is not simply a heuristic, it is a fundamental
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 			 * limitation, since there is no guaranteed way to emit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 			 * PLT entries sufficiently close to the branch if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 			 * section size exceeds the range of a branch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 			 * instruction. So ignore relocations against defined
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 			 * symbols if they live in the same section as the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 			 * relocation target.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 			s = syms + ELF32_R_SYM(rel[i].r_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 			if (s->st_shndx == dstidx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 			 * Jump relocations with non-zero addends against
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 			 * undefined symbols are supported by the ELF spec, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 			 * do not occur in practice (e.g., 'jump n bytes past
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 			 * the entry point of undefined function symbol f').
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 			 * So we need to support them, but there is no need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 			 * take them into consideration when trying to optimize
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 			 * this code. So let's only check for duplicates when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 			 * the addend is zero. (Note that calls into the core
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 			 * module via init PLT entries could involve section
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 			 * relative symbol references with non-zero addends, for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 			 * which we may end up emitting duplicates, but the init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 			 * PLT is released along with the rest of the .init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 			 * region as soon as module loading completes.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 			if (!is_zero_addend_relocation(base, rel + i) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 			    !duplicate_rel(base, rel, i))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 				ret++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 			      char *secstrings, struct module *mod)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	unsigned long core_plts = ARRAY_SIZE(fixed_plts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	unsigned long init_plts = ARRAY_SIZE(fixed_plts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	Elf32_Shdr *s, *sechdrs_end = sechdrs + ehdr->e_shnum;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	Elf32_Sym *syms = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	 * To store the PLTs, we expand the .text section for core module code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	 * and for initialization code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 	for (s = sechdrs; s < sechdrs_end; ++s) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 		if (strcmp(".plt", secstrings + s->sh_name) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 			mod->arch.core.plt = s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 		else if (strcmp(".init.plt", secstrings + s->sh_name) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 			mod->arch.init.plt = s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 		else if (s->sh_type == SHT_SYMTAB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 			syms = (Elf32_Sym *)s->sh_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	if (!mod->arch.core.plt || !mod->arch.init.plt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 		pr_err("%s: module PLT section(s) missing\n", mod->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 		return -ENOEXEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	if (!syms) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 		pr_err("%s: module symtab section missing\n", mod->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 		return -ENOEXEC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	for (s = sechdrs + 1; s < sechdrs_end; ++s) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 		Elf32_Rel *rels = (void *)ehdr + s->sh_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 		int numrels = s->sh_size / sizeof(Elf32_Rel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 		Elf32_Shdr *dstsec = sechdrs + s->sh_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 		if (s->sh_type != SHT_REL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 		/* ignore relocations that operate on non-exec sections */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 		if (!(dstsec->sh_flags & SHF_EXECINSTR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 		/* sort by type and symbol index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 		sort(rels, numrels, sizeof(Elf32_Rel), cmp_rel, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 		if (strncmp(secstrings + dstsec->sh_name, ".init", 5) != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 			core_plts += count_plts(syms, dstsec->sh_addr, rels,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 						numrels, s->sh_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 			init_plts += count_plts(syms, dstsec->sh_addr, rels,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 						numrels, s->sh_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	mod->arch.core.plt->sh_type = SHT_NOBITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	mod->arch.core.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	mod->arch.core.plt->sh_addralign = L1_CACHE_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	mod->arch.core.plt->sh_size = round_up(core_plts * PLT_ENT_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 					       sizeof(struct plt_entries));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	mod->arch.core.plt_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	mod->arch.core.plt_ent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	mod->arch.init.plt->sh_type = SHT_NOBITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	mod->arch.init.plt->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	mod->arch.init.plt->sh_addralign = L1_CACHE_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	mod->arch.init.plt->sh_size = round_up(init_plts * PLT_ENT_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 					       sizeof(struct plt_entries));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	mod->arch.init.plt_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	mod->arch.init.plt_ent = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	pr_debug("%s: plt=%x, init.plt=%x\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 		 mod->arch.core.plt->sh_size, mod->arch.init.plt->sh_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }