^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * vDSO implementation for Hexagon
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2011, The Linux Foundation. All rights reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/binfmts.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <asm/vdso.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) static struct page *vdso_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) /* Create a vDSO page holding the signal trampoline.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * We want this for a non-executable stack.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) static int __init vdso_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) struct hexagon_vdso *vdso;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) vdso_page = alloc_page(GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) if (!vdso_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) panic("Cannot allocate vdso");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) vdso = vmap(&vdso_page, 1, 0, PAGE_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) if (!vdso)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) panic("Cannot map vdso");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) clear_page(vdso);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) /* Install the signal trampoline; currently looks like this:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * r6 = #__NR_rt_sigreturn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * trap0(#1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) vdso->rt_signal_trampoline[0] = __rt_sigtramp_template[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) vdso->rt_signal_trampoline[1] = __rt_sigtramp_template[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) vunmap(vdso);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) arch_initcall(vdso_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * Called from binfmt_elf. Create a VMA for the vDSO page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) unsigned long vdso_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct mm_struct *mm = current->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) if (mmap_write_lock_killable(mm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) return -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) /* Try to get it loaded right near ld.so/glibc. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) vdso_base = STACK_TOP;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) vdso_base = get_unmapped_area(NULL, vdso_base, PAGE_SIZE, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) if (IS_ERR_VALUE(vdso_base)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) ret = vdso_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) goto up_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /* MAYWRITE to allow gdb to COW and set breakpoints. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) ret = install_special_mapping(mm, vdso_base, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) VM_READ|VM_EXEC|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) &vdso_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) goto up_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) mm->context.vdso = (void *)vdso_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) up_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) mmap_write_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) const char *arch_vma_name(struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) return "[vdso]";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }