Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * VDSO implementations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * Copyright (C) 2012 ARM Limited
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Author: Will Deacon <will.deacon@arm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/cache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/clocksource.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/elf.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/time_namespace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <linux/timekeeper_internal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <vdso/datapage.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <vdso/helpers.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include <vdso/vsyscall.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #include <asm/signal32.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #include <asm/vdso.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) extern char vdso_start[], vdso_end[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) extern char vdso32_start[], vdso32_end[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) enum vdso_abi {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	VDSO_ABI_AA64,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	VDSO_ABI_AA32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) enum vvar_pages {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	VVAR_DATA_PAGE_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	VVAR_TIMENS_PAGE_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	VVAR_NR_PAGES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) struct vdso_abi_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	const char *vdso_code_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	const char *vdso_code_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	unsigned long vdso_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	/* Data Mapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	struct vm_special_mapping *dm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	/* Code Mapping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	struct vm_special_mapping *cm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) static struct vdso_abi_info vdso_info[] __ro_after_init = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	[VDSO_ABI_AA64] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 		.name = "vdso",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 		.vdso_code_start = vdso_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 		.vdso_code_end = vdso_end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) #ifdef CONFIG_COMPAT_VDSO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	[VDSO_ABI_AA32] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 		.name = "vdso32",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 		.vdso_code_start = vdso32_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 		.vdso_code_end = vdso32_end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) #endif /* CONFIG_COMPAT_VDSO */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73)  * The vDSO data page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) static union {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	struct vdso_data	data[CS_BASES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	u8			page[PAGE_SIZE];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) } vdso_data_store __page_aligned_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) struct vdso_data *vdso_data = vdso_data_store.data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) static int __vdso_remap(enum vdso_abi abi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 			const struct vm_special_mapping *sm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 			struct vm_area_struct *new_vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	unsigned long vdso_size = vdso_info[abi].vdso_code_end -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 				  vdso_info[abi].vdso_code_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	if (vdso_size != new_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	current->mm->context.vdso = (void *)new_vma->vm_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) static int __vdso_init(enum vdso_abi abi)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	struct page **vdso_pagelist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	unsigned long pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 	if (memcmp(vdso_info[abi].vdso_code_start, "\177ELF", 4)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 		pr_err("vDSO is not a valid ELF object!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	vdso_info[abi].vdso_pages = (
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 			vdso_info[abi].vdso_code_end -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 			vdso_info[abi].vdso_code_start) >>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 			PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	vdso_pagelist = kcalloc(vdso_info[abi].vdso_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 				sizeof(struct page *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 				GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	if (vdso_pagelist == NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	/* Grab the vDSO code pages. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	pfn = sym_to_pfn(vdso_info[abi].vdso_code_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	for (i = 0; i < vdso_info[abi].vdso_pages; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 		vdso_pagelist[i] = pfn_to_page(pfn + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	vdso_info[abi].cm->pages = vdso_pagelist;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #ifdef CONFIG_TIME_NS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) struct vdso_data *arch_get_vdso_data(void *vvar_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	return (struct vdso_data *)(vvar_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)  * The vvar mapping contains data for a specific time namespace, so when a task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)  * changes namespace we must unmap its vvar data for the old namespace.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)  * Subsequent faults will map in data for the new namespace.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)  * For more details see timens_setup_vdso_data().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 	struct mm_struct *mm = task->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	struct vm_area_struct *vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	mmap_read_lock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	for (vma = mm->mmap; vma; vma = vma->vm_next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 		unsigned long size = vma->vm_end - vma->vm_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 		if (vma_is_special_mapping(vma, vdso_info[VDSO_ABI_AA64].dm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 			zap_page_range(vma, vma->vm_start, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) #ifdef CONFIG_COMPAT_VDSO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 		if (vma_is_special_mapping(vma, vdso_info[VDSO_ABI_AA32].dm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 			zap_page_range(vma, vma->vm_start, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	mmap_read_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	if (likely(vma->vm_mm == current->mm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 		return current->nsproxy->time_ns->vvar_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	 * VM_PFNMAP | VM_IO protect .fault() handler from being called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	 * through interfaces like /proc/$pid/mem or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	 * process_vm_{readv,writev}() as long as there's no .access()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	 * in special_mapping_vmops.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	 * For more details check_vma_flags() and __access_remote_vm()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	WARN(1, "vvar_page accessed remotely");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 			     struct vm_area_struct *vma, struct vm_fault *vmf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	struct page *timens_page = find_timens_vvar_page(vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	unsigned long pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	switch (vmf->pgoff) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	case VVAR_DATA_PAGE_OFFSET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 		if (timens_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 			pfn = page_to_pfn(timens_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 			pfn = sym_to_pfn(vdso_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) #ifdef CONFIG_TIME_NS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	case VVAR_TIMENS_PAGE_OFFSET:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 		 * If a task belongs to a time namespace then a namespace
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 		 * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 		 * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 		 * offset.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 		 * See also the comment near timens_setup_vdso_data().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 		if (!timens_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 			return VM_FAULT_SIGBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 		pfn = sym_to_pfn(vdso_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) #endif /* CONFIG_TIME_NS */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 		return VM_FAULT_SIGBUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	return vmf_insert_pfn(vma, vmf->address, pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) static int vvar_mremap(const struct vm_special_mapping *sm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 		       struct vm_area_struct *new_vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 	unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	if (new_size != VVAR_NR_PAGES * PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) static int __setup_additional_pages(enum vdso_abi abi,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 				    struct mm_struct *mm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 				    struct linux_binprm *bprm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 				    int uses_interp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	unsigned long gp_flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	void *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	vdso_text_len = vdso_info[abi].vdso_pages << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	/* Be sure to map the data page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	vdso_mapping_len = vdso_text_len + VVAR_NR_PAGES * PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	if (IS_ERR_VALUE(vdso_base)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 		ret = ERR_PTR(vdso_base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 		goto up_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	ret = _install_special_mapping(mm, vdso_base, VVAR_NR_PAGES * PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 				       VM_READ|VM_MAYREAD|VM_PFNMAP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 				       vdso_info[abi].dm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	if (IS_ERR(ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 		goto up_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) && system_supports_bti())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 		gp_flags = VM_ARM64_BTI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	vdso_base += VVAR_NR_PAGES * PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	mm->context.vdso = (void *)vdso_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 				       VM_READ|VM_EXEC|gp_flags|
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 				       VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 				       vdso_info[abi].cm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 	if (IS_ERR(ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 		goto up_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) up_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	mm->context.vdso = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	return PTR_ERR(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) #ifdef CONFIG_COMPAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)  * Create and map the vectors page for AArch32 tasks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) static int aarch32_vdso_mremap(const struct vm_special_mapping *sm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 		struct vm_area_struct *new_vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	return __vdso_remap(VDSO_ABI_AA32, sm, new_vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) enum aarch32_map {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	AA32_MAP_VECTORS, /* kuser helpers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	AA32_MAP_SIGPAGE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	AA32_MAP_VVAR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	AA32_MAP_VDSO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) static struct page *aarch32_vectors_page __ro_after_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) static struct page *aarch32_sig_page __ro_after_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) static struct vm_special_mapping aarch32_vdso_maps[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	[AA32_MAP_VECTORS] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 		.name	= "[vectors]", /* ABI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 		.pages	= &aarch32_vectors_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	[AA32_MAP_SIGPAGE] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 		.name	= "[sigpage]", /* ABI */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 		.pages	= &aarch32_sig_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	[AA32_MAP_VVAR] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 		.name = "[vvar]",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 		.fault = vvar_fault,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 		.mremap = vvar_mremap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	[AA32_MAP_VDSO] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 		.name = "[vdso]",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 		.mremap = aarch32_vdso_mremap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) static int aarch32_alloc_kuser_vdso_page(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	extern char __kuser_helper_start[], __kuser_helper_end[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	int kuser_sz = __kuser_helper_end - __kuser_helper_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	unsigned long vdso_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	if (!IS_ENABLED(CONFIG_KUSER_HELPERS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	vdso_page = get_zeroed_page(GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	if (!vdso_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	memcpy((void *)(vdso_page + 0x1000 - kuser_sz), __kuser_helper_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	       kuser_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	aarch32_vectors_page = virt_to_page(vdso_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	flush_dcache_page(aarch32_vectors_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) static int aarch32_alloc_sigpage(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	unsigned long sigpage;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	sigpage = get_zeroed_page(GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	if (!sigpage)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	memcpy((void *)sigpage, __aarch32_sigret_code_start, sigret_sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	aarch32_sig_page = virt_to_page(sigpage);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	flush_dcache_page(aarch32_sig_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) static int __aarch32_alloc_vdso_pages(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	if (!IS_ENABLED(CONFIG_COMPAT_VDSO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 	vdso_info[VDSO_ABI_AA32].dm = &aarch32_vdso_maps[AA32_MAP_VVAR];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	vdso_info[VDSO_ABI_AA32].cm = &aarch32_vdso_maps[AA32_MAP_VDSO];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	return __vdso_init(VDSO_ABI_AA32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) static int __init aarch32_alloc_vdso_pages(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	ret = __aarch32_alloc_vdso_pages();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 	ret = aarch32_alloc_sigpage();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	return aarch32_alloc_kuser_vdso_page();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) arch_initcall(aarch32_alloc_vdso_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) static int aarch32_kuser_helpers_setup(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 	void *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	if (!IS_ENABLED(CONFIG_KUSER_HELPERS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	 * Avoid VM_MAYWRITE for compatibility with arch/arm/, where it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 	 * not safe to CoW the page containing the CPU exception vectors.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 	ret = _install_special_mapping(mm, AARCH32_VECTORS_BASE, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 				       VM_READ | VM_EXEC |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 				       VM_MAYREAD | VM_MAYEXEC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 				       &aarch32_vdso_maps[AA32_MAP_VECTORS]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	return PTR_ERR_OR_ZERO(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) static int aarch32_sigreturn_setup(struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	void *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	if (IS_ERR_VALUE(addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 		ret = ERR_PTR(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	 * VM_MAYWRITE is required to allow gdb to Copy-on-Write and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	 * set breakpoints.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	ret = _install_special_mapping(mm, addr, PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 				       VM_READ | VM_EXEC | VM_MAYREAD |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 				       VM_MAYWRITE | VM_MAYEXEC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 				       &aarch32_vdso_maps[AA32_MAP_SIGPAGE]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	if (IS_ERR(ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	mm->context.sigpage = (void *)addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 	return PTR_ERR_OR_ZERO(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	struct mm_struct *mm = current->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	if (mmap_write_lock_killable(mm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 		return -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 	ret = aarch32_kuser_helpers_setup(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 		goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 	if (IS_ENABLED(CONFIG_COMPAT_VDSO)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 		ret = __setup_additional_pages(VDSO_ABI_AA32, mm, bprm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 					       uses_interp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 		if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 			goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	ret = aarch32_sigreturn_setup(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	mmap_write_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) #endif /* CONFIG_COMPAT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) static int vdso_mremap(const struct vm_special_mapping *sm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 		struct vm_area_struct *new_vma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 	return __vdso_remap(VDSO_ABI_AA64, sm, new_vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) enum aarch64_map {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	AA64_MAP_VVAR,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 	AA64_MAP_VDSO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) static struct vm_special_mapping aarch64_vdso_maps[] __ro_after_init = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	[AA64_MAP_VVAR] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 		.name	= "[vvar]",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 		.fault = vvar_fault,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 		.mremap = vvar_mremap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	[AA64_MAP_VDSO] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 		.name	= "[vdso]",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 		.mremap = vdso_mremap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) static int __init vdso_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 	vdso_info[VDSO_ABI_AA64].dm = &aarch64_vdso_maps[AA64_MAP_VVAR];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 	vdso_info[VDSO_ABI_AA64].cm = &aarch64_vdso_maps[AA64_MAP_VDSO];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 	return __vdso_init(VDSO_ABI_AA64);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) arch_initcall(vdso_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 	struct mm_struct *mm = current->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 	if (mmap_write_lock_killable(mm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 		return -EINTR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 	ret = __setup_additional_pages(VDSO_ABI_AA64, mm, bprm, uses_interp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 	mmap_write_unlock(mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) }