^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * <benh@kernel.crashing.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/smp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/stddef.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/unistd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/user.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/elf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/security.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <asm/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <asm/mmu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <asm/mmu_context.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <asm/prom.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <asm/machdep.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <asm/cputable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <asm/sections.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <asm/firmware.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <asm/vdso.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <asm/vdso_datapage.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <asm/setup.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #undef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #ifdef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define DBG(fmt...) printk(fmt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define DBG(fmt...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) /* Max supported size for symbol names */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define MAX_SYMNAME 64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) /* The alignment of the vDSO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define VDSO_ALIGNMENT (1 << 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) static unsigned int vdso32_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static void *vdso32_kbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) static struct page **vdso32_pagelist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) unsigned long vdso32_sigtramp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) unsigned long vdso32_rt_sigtramp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #ifdef CONFIG_VDSO32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) extern char vdso32_start, vdso32_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) extern char vdso64_start, vdso64_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) static void *vdso64_kbase = &vdso64_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) static unsigned int vdso64_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static struct page **vdso64_pagelist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) unsigned long vdso64_rt_sigtramp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #endif /* CONFIG_PPC64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) static int vdso_ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * The vdso data page (aka. systemcfg for old ppc64 fans) is here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * Once the early boot kernel code no longer needs to muck around
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * with it, it will become dynamically allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) static union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) struct vdso_data data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) u8 page[PAGE_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) } vdso_data_store __page_aligned_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) struct vdso_data *vdso_data = &vdso_data_store.data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) /* Format of the patch table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) struct vdso_patch_def
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) unsigned long ftr_mask, ftr_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) const char *gen_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) const char *fix_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) /* Table of functions to patch based on the CPU type/revision
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) * Currently, we only change sync_dicache to do nothing on processors
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * with a coherent icache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) static struct vdso_patch_def vdso_patches[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) CPU_FTR_COHERENT_ICACHE, CPU_FTR_COHERENT_ICACHE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) "__kernel_sync_dicache", "__kernel_sync_dicache_p5"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) },
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * Some infos carried around for each of them during parsing at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * boot time.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) struct lib32_elfinfo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) Elf32_Ehdr *hdr; /* ptr to ELF */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) Elf32_Sym *dynsym; /* ptr to .dynsym section */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) unsigned long dynsymsize; /* size of .dynsym section */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) char *dynstr; /* ptr to .dynstr section */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) unsigned long text; /* offset of .text section in .so */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) struct lib64_elfinfo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) Elf64_Ehdr *hdr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) Elf64_Sym *dynsym;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) unsigned long dynsymsize;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) char *dynstr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) unsigned long text;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * This is called from binfmt_elf, we create the special vma for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * vDSO and insert it into the mm struct tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) struct mm_struct *mm = current->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) struct page **vdso_pagelist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) unsigned long vdso_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) unsigned long vdso_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) if (!vdso_ready)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (is_32bit_task()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) vdso_pagelist = vdso32_pagelist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) vdso_pages = vdso32_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) vdso_base = VDSO32_MBASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) vdso_pagelist = vdso64_pagelist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) vdso_pages = vdso64_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * On 64bit we don't have a preferred map address. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * allows get_unmapped_area to find an area near other mmaps
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * and most likely share a SLB entry.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) vdso_base = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) vdso_pagelist = vdso32_pagelist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) vdso_pages = vdso32_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) vdso_base = VDSO32_MBASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) current->mm->context.vdso_base = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) /* vDSO has a problem and was disabled, just don't "enable" it for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) if (vdso_pages == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /* Add a page to the vdso size for the data page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) vdso_pages ++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * pick a base address for the vDSO in process space. We try to put it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * at vdso_base which is the "natural" base for it, but we might fail
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * and end up putting it elsewhere.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * Add enough to the size so that the result can be aligned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) if (mmap_write_lock_killable(mm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) return -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) vdso_base = get_unmapped_area(NULL, vdso_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) (vdso_pages << PAGE_SHIFT) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) if (IS_ERR_VALUE(vdso_base)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) rc = vdso_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) goto fail_mmapsem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) /* Add required alignment. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) vdso_base = ALIGN(vdso_base, VDSO_ALIGNMENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) * Put vDSO base into mm struct. We need to do this before calling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * install_special_mapping or the perf counter mmap tracking code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * will fail to recognise it as a vDSO (since arch_vma_name fails).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) current->mm->context.vdso_base = vdso_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * our vma flags don't have VM_WRITE so by default, the process isn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * allowed to write those pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) * gdb can break that with ptrace interface, and thus trigger COW on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * those pages but it's then your responsibility to never do that on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * the "data" page of the vDSO or you'll stop getting kernel updates
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * and your nice userland gettimeofday will be totally dead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * It's fine to use that for setting breakpoints in the vDSO code
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * pages though.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) rc = install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) VM_READ|VM_EXEC|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) vdso_pagelist);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) if (rc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) current->mm->context.vdso_base = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) goto fail_mmapsem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) mmap_write_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) fail_mmapsem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) mmap_write_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) const char *arch_vma_name(struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) return "[vdso]";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) #ifdef CONFIG_VDSO32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) static void * __init find_section32(Elf32_Ehdr *ehdr, const char *secname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) unsigned long *size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) Elf32_Shdr *sechdrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) char *secnames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) /* Grab section headers and strings so we can tell who is who */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) sechdrs = (void *)ehdr + ehdr->e_shoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) secnames = (void *)ehdr + sechdrs[ehdr->e_shstrndx].sh_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) /* Find the section they want */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) for (i = 1; i < ehdr->e_shnum; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) if (strcmp(secnames+sechdrs[i].sh_name, secname) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) if (size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) *size = sechdrs[i].sh_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) return (void *)ehdr + sechdrs[i].sh_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) *size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) static Elf32_Sym * __init find_symbol32(struct lib32_elfinfo *lib,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) const char *symname)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) char name[MAX_SYMNAME], *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) for (i = 0; i < (lib->dynsymsize / sizeof(Elf32_Sym)); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) if (lib->dynsym[i].st_name == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) strlcpy(name, lib->dynstr + lib->dynsym[i].st_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) MAX_SYMNAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) c = strchr(name, '@');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) if (c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) *c = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) if (strcmp(symname, name) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) return &lib->dynsym[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) /* Note that we assume the section is .text and the symbol is relative to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * the library base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) static unsigned long __init find_function32(struct lib32_elfinfo *lib,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) const char *symname)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) Elf32_Sym *sym = find_symbol32(lib, symname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) if (sym == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) printk(KERN_WARNING "vDSO32: function %s not found !\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) symname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) return sym->st_value - VDSO32_LBASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) static int __init vdso_do_func_patch32(struct lib32_elfinfo *v32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) struct lib64_elfinfo *v64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) const char *orig, const char *fix)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) Elf32_Sym *sym32_gen, *sym32_fix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) sym32_gen = find_symbol32(v32, orig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) if (sym32_gen == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) printk(KERN_ERR "vDSO32: Can't find symbol %s !\n", orig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) if (fix == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) sym32_gen->st_name = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) sym32_fix = find_symbol32(v32, fix);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) if (sym32_fix == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) printk(KERN_ERR "vDSO32: Can't find symbol %s !\n", fix);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) sym32_gen->st_value = sym32_fix->st_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) sym32_gen->st_size = sym32_fix->st_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) sym32_gen->st_info = sym32_fix->st_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) sym32_gen->st_other = sym32_fix->st_other;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) sym32_gen->st_shndx = sym32_fix->st_shndx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) #else /* !CONFIG_VDSO32 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) static unsigned long __init find_function32(struct lib32_elfinfo *lib,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) const char *symname)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) static int __init vdso_do_func_patch32(struct lib32_elfinfo *v32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) struct lib64_elfinfo *v64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) const char *orig, const char *fix)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) #endif /* CONFIG_VDSO32 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) static void * __init find_section64(Elf64_Ehdr *ehdr, const char *secname,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) unsigned long *size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) Elf64_Shdr *sechdrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) char *secnames;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) /* Grab section headers and strings so we can tell who is who */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) sechdrs = (void *)ehdr + ehdr->e_shoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) secnames = (void *)ehdr + sechdrs[ehdr->e_shstrndx].sh_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) /* Find the section they want */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) for (i = 1; i < ehdr->e_shnum; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) if (strcmp(secnames+sechdrs[i].sh_name, secname) == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) if (size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) *size = sechdrs[i].sh_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) return (void *)ehdr + sechdrs[i].sh_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) *size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) static Elf64_Sym * __init find_symbol64(struct lib64_elfinfo *lib,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) const char *symname)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) char name[MAX_SYMNAME], *c;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) for (i = 0; i < (lib->dynsymsize / sizeof(Elf64_Sym)); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) if (lib->dynsym[i].st_name == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) strlcpy(name, lib->dynstr + lib->dynsym[i].st_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) MAX_SYMNAME);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) c = strchr(name, '@');
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) if (c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) *c = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) if (strcmp(symname, name) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) return &lib->dynsym[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) /* Note that we assume the section is .text and the symbol is relative to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * the library base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) static unsigned long __init find_function64(struct lib64_elfinfo *lib,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) const char *symname)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) Elf64_Sym *sym = find_symbol64(lib, symname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) if (sym == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) printk(KERN_WARNING "vDSO64: function %s not found !\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) symname);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) return sym->st_value - VDSO64_LBASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) static int __init vdso_do_func_patch64(struct lib32_elfinfo *v32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) struct lib64_elfinfo *v64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) const char *orig, const char *fix)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) Elf64_Sym *sym64_gen, *sym64_fix;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) sym64_gen = find_symbol64(v64, orig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) if (sym64_gen == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) printk(KERN_ERR "vDSO64: Can't find symbol %s !\n", orig);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (fix == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) sym64_gen->st_name = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) sym64_fix = find_symbol64(v64, fix);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) if (sym64_fix == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) printk(KERN_ERR "vDSO64: Can't find symbol %s !\n", fix);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) sym64_gen->st_value = sym64_fix->st_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) sym64_gen->st_size = sym64_fix->st_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) sym64_gen->st_info = sym64_fix->st_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) sym64_gen->st_other = sym64_fix->st_other;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) sym64_gen->st_shndx = sym64_fix->st_shndx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) #endif /* CONFIG_PPC64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) static __init int vdso_do_find_sections(struct lib32_elfinfo *v32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) struct lib64_elfinfo *v64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) void *sect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) * Locate symbol tables & text section
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) #ifdef CONFIG_VDSO32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) v32->dynsym = find_section32(v32->hdr, ".dynsym", &v32->dynsymsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) v32->dynstr = find_section32(v32->hdr, ".dynstr", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) if (v32->dynsym == NULL || v32->dynstr == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) printk(KERN_ERR "vDSO32: required symbol section not found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) sect = find_section32(v32->hdr, ".text", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) if (sect == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) printk(KERN_ERR "vDSO32: the .text section was not found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) v32->text = sect - vdso32_kbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) v64->dynsym = find_section64(v64->hdr, ".dynsym", &v64->dynsymsize);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) v64->dynstr = find_section64(v64->hdr, ".dynstr", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) if (v64->dynsym == NULL || v64->dynstr == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) printk(KERN_ERR "vDSO64: required symbol section not found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) sect = find_section64(v64->hdr, ".text", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) if (sect == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) printk(KERN_ERR "vDSO64: the .text section was not found\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) v64->text = sect - vdso64_kbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) #endif /* CONFIG_PPC64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) static __init void vdso_setup_trampolines(struct lib32_elfinfo *v32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) struct lib64_elfinfo *v64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) * Find signal trampolines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) vdso64_rt_sigtramp = find_function64(v64, "__kernel_start_sigtramp_rt64");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) vdso32_sigtramp = find_function32(v32, "__kernel_sigtramp32");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) vdso32_rt_sigtramp = find_function32(v32, "__kernel_sigtramp_rt32");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) static __init int vdso_fixup_datapage(struct lib32_elfinfo *v32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) struct lib64_elfinfo *v64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) #ifdef CONFIG_VDSO32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) Elf32_Sym *sym32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) Elf64_Sym *sym64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) sym64 = find_symbol64(v64, "__kernel_datapage_offset");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) if (sym64 == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) printk(KERN_ERR "vDSO64: Can't find symbol "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) "__kernel_datapage_offset !\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) *((int *)(vdso64_kbase + sym64->st_value - VDSO64_LBASE)) =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) (vdso64_pages << PAGE_SHIFT) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) (sym64->st_value - VDSO64_LBASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) #endif /* CONFIG_PPC64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) #ifdef CONFIG_VDSO32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) sym32 = find_symbol32(v32, "__kernel_datapage_offset");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) if (sym32 == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) printk(KERN_ERR "vDSO32: Can't find symbol "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) "__kernel_datapage_offset !\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) *((int *)(vdso32_kbase + (sym32->st_value - VDSO32_LBASE))) =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) (vdso32_pages << PAGE_SHIFT) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) (sym32->st_value - VDSO32_LBASE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) static __init int vdso_fixup_features(struct lib32_elfinfo *v32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) struct lib64_elfinfo *v64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) unsigned long size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) void *start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) start = find_section64(v64->hdr, "__ftr_fixup", &size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) if (start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) do_feature_fixups(cur_cpu_spec->cpu_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) start, start + size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) start = find_section64(v64->hdr, "__mmu_ftr_fixup", &size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) if (start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) do_feature_fixups(cur_cpu_spec->mmu_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) start, start + size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) start = find_section64(v64->hdr, "__fw_ftr_fixup", &size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) if (start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) do_feature_fixups(powerpc_firmware_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) start, start + size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) start = find_section64(v64->hdr, "__lwsync_fixup", &size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) if (start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) do_lwsync_fixups(cur_cpu_spec->cpu_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) start, start + size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) #endif /* CONFIG_PPC64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) #ifdef CONFIG_VDSO32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) start = find_section32(v32->hdr, "__ftr_fixup", &size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) if (start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) do_feature_fixups(cur_cpu_spec->cpu_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) start, start + size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) start = find_section32(v32->hdr, "__mmu_ftr_fixup", &size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) if (start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) do_feature_fixups(cur_cpu_spec->mmu_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) start, start + size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) start = find_section32(v32->hdr, "__fw_ftr_fixup", &size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) if (start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) do_feature_fixups(powerpc_firmware_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) start, start + size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) #endif /* CONFIG_PPC64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) start = find_section32(v32->hdr, "__lwsync_fixup", &size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) if (start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) do_lwsync_fixups(cur_cpu_spec->cpu_features,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) start, start + size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) static __init int vdso_fixup_alt_funcs(struct lib32_elfinfo *v32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) struct lib64_elfinfo *v64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) for (i = 0; i < ARRAY_SIZE(vdso_patches); i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) struct vdso_patch_def *patch = &vdso_patches[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) int match = (cur_cpu_spec->cpu_features & patch->ftr_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) == patch->ftr_value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) if (!match)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) DBG("replacing %s with %s...\n", patch->gen_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) patch->fix_name ? "NONE" : patch->fix_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) * Patch the 32 bits and 64 bits symbols. Note that we do not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) * patch the "." symbol on 64 bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) * It would be easy to do, but doesn't seem to be necessary,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) * patching the OPD symbol is enough.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) vdso_do_func_patch32(v32, v64, patch->gen_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) patch->fix_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) vdso_do_func_patch64(v32, v64, patch->gen_name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) patch->fix_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) #endif /* CONFIG_PPC64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) static __init int vdso_setup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) struct lib32_elfinfo v32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) struct lib64_elfinfo v64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) v32.hdr = vdso32_kbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) v64.hdr = vdso64_kbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) if (vdso_do_find_sections(&v32, &v64))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) if (vdso_fixup_datapage(&v32, &v64))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) if (vdso_fixup_features(&v32, &v64))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) if (vdso_fixup_alt_funcs(&v32, &v64))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) vdso_setup_trampolines(&v32, &v64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) * Called from setup_arch to initialize the bitmap of available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) * syscalls in the systemcfg page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) static void __init vdso_setup_syscall_map(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) unsigned int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) extern unsigned long *sys_call_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) extern unsigned long *compat_sys_call_table;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) extern unsigned long sys_ni_syscall;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) for (i = 0; i < NR_syscalls; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) if (sys_call_table[i] != sys_ni_syscall)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) vdso_data->syscall_map_64[i >> 5] |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 0x80000000UL >> (i & 0x1f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) if (IS_ENABLED(CONFIG_COMPAT) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) compat_sys_call_table[i] != sys_ni_syscall)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) vdso_data->syscall_map_32[i >> 5] |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 0x80000000UL >> (i & 0x1f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) #else /* CONFIG_PPC64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) if (sys_call_table[i] != sys_ni_syscall)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) vdso_data->syscall_map_32[i >> 5] |=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 0x80000000UL >> (i & 0x1f);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) #endif /* CONFIG_PPC64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) int vdso_getcpu_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) unsigned long cpu, node, val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) * SPRG_VDSO contains the CPU in the bottom 16 bits and the NUMA node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) * in the next 16 bits. The VDSO uses this to implement getcpu().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) cpu = get_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) WARN_ON_ONCE(cpu > 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) node = cpu_to_node(cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) WARN_ON_ONCE(node > 0xffff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) val = (cpu & 0xffff) | ((node & 0xffff) << 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) mtspr(SPRN_SPRG_VDSO_WRITE, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) get_paca()->sprg_vdso = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) put_cpu();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) /* We need to call this before SMP init */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) early_initcall(vdso_getcpu_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) static int __init vdso_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) * Fill up the "systemcfg" stuff for backward compatibility
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) strcpy((char *)vdso_data->eye_catcher, "SYSTEMCFG:PPC64");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) vdso_data->version.major = SYSTEMCFG_MAJOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) vdso_data->version.minor = SYSTEMCFG_MINOR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) vdso_data->processor = mfspr(SPRN_PVR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) * Fake the old platform number for pSeries and add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) * in LPAR bit if necessary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) vdso_data->platform = 0x100;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) if (firmware_has_feature(FW_FEATURE_LPAR))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) vdso_data->platform |= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) vdso_data->physicalMemorySize = memblock_phys_mem_size();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) vdso_data->dcache_size = ppc64_caches.l1d.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) vdso_data->dcache_line_size = ppc64_caches.l1d.line_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) vdso_data->icache_size = ppc64_caches.l1i.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) vdso_data->icache_line_size = ppc64_caches.l1i.line_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) vdso_data->dcache_block_size = ppc64_caches.l1d.block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) vdso_data->icache_block_size = ppc64_caches.l1i.block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) vdso_data->dcache_log_block_size = ppc64_caches.l1d.log_block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) vdso_data->icache_log_block_size = ppc64_caches.l1i.log_block_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) * Calculate the size of the 64 bits vDSO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) vdso64_pages = (&vdso64_end - &vdso64_start) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) DBG("vdso64_kbase: %p, 0x%x pages\n", vdso64_kbase, vdso64_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) #endif /* CONFIG_PPC64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) #ifdef CONFIG_VDSO32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) vdso32_kbase = &vdso32_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) * Calculate the size of the 32 bits vDSO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) vdso32_pages = (&vdso32_end - &vdso32_start) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) DBG("vdso32_kbase: %p, 0x%x pages\n", vdso32_kbase, vdso32_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) * Setup the syscall map in the vDOS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) vdso_setup_syscall_map();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) * Initialize the vDSO images in memory, that is do necessary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) * fixups of vDSO symbols, locate trampolines, etc...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) if (vdso_setup()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) printk(KERN_ERR "vDSO setup failure, not enabled !\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) vdso32_pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) vdso64_pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) #ifdef CONFIG_VDSO32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) /* Make sure pages are in the correct state */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) vdso32_pagelist = kcalloc(vdso32_pages + 2, sizeof(struct page *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) BUG_ON(vdso32_pagelist == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) for (i = 0; i < vdso32_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) get_page(pg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) vdso32_pagelist[i] = pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) vdso32_pagelist[i++] = virt_to_page(vdso_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) vdso32_pagelist[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) vdso64_pagelist = kcalloc(vdso64_pages + 2, sizeof(struct page *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) BUG_ON(vdso64_pagelist == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) for (i = 0; i < vdso64_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) get_page(pg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) vdso64_pagelist[i] = pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) vdso64_pagelist[i++] = virt_to_page(vdso_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) vdso64_pagelist[i] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) #endif /* CONFIG_PPC64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) get_page(virt_to_page(vdso_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) smp_wmb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) vdso_ready = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) arch_initcall(vdso_init);