Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2)  * This file is subject to the terms and conditions of the GNU General Public
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * License.  See the file "COPYING" in the main directory of this archive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Copyright (C) 2011 Wind River Systems,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *   written by Ralf Baechle <ralf@linux-mips.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/compiler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/elf-randomize.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/mman.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/personality.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/random.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/sched/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) unsigned long shm_align_mask = PAGE_SIZE - 1;	/* Sane caches */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) EXPORT_SYMBOL(shm_align_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #define COLOUR_ALIGN(addr, pgoff)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	((((addr) + shm_align_mask) & ~shm_align_mask) +	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 	 (((pgoff) << PAGE_SHIFT) & shm_align_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) enum mmap_allocation_direction {UP, DOWN};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) static unsigned long arch_get_unmapped_area_common(struct file *filp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	unsigned long addr0, unsigned long len, unsigned long pgoff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	unsigned long flags, enum mmap_allocation_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 	struct mm_struct *mm = current->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 	struct vm_area_struct *vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	unsigned long addr = addr0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	int do_color_align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	struct vm_unmapped_area_info info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	if (unlikely(len > TASK_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 		return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	if (flags & MAP_FIXED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 		/* Even MAP_FIXED mappings must reside within TASK_SIZE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 		if (TASK_SIZE - len < addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 		 * We do not accept a shared mapping if it would violate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 		 * cache aliasing constraints.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 		if ((flags & MAP_SHARED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 		    ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 			return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 		return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	do_color_align = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	if (filp || (flags & MAP_SHARED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 		do_color_align = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	/* requesting a specific address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	if (addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 		if (do_color_align)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 			addr = COLOUR_ALIGN(addr, pgoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 			addr = PAGE_ALIGN(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 		vma = find_vma(mm, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 		if (TASK_SIZE - len >= addr &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 		    (!vma || addr + len <= vm_start_gap(vma)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 			return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	info.length = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	info.align_offset = pgoff << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	if (dir == DOWN) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 		info.flags = VM_UNMAPPED_AREA_TOPDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 		info.low_limit = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 		info.high_limit = mm->mmap_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 		addr = vm_unmapped_area(&info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 		if (!(addr & ~PAGE_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 			return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 		 * A failed mmap() very likely causes application failure,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 		 * so fall back to the bottom-up function here. This scenario
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 		 * can happen with large stack limits and large mmap()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 		 * allocations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	info.flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	info.low_limit = mm->mmap_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	info.high_limit = TASK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	return vm_unmapped_area(&info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	unsigned long len, unsigned long pgoff, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	return arch_get_unmapped_area_common(filp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 			addr0, len, pgoff, flags, UP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)  * There is no need to export this but sched.h declares the function as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)  * extern so making it static here results in an error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) unsigned long arch_get_unmapped_area_topdown(struct file *filp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	unsigned long addr0, unsigned long len, unsigned long pgoff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	return arch_get_unmapped_area_common(filp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 			addr0, len, pgoff, flags, DOWN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) bool __virt_addr_valid(const volatile void *kaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	unsigned long vaddr = (unsigned long)kaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	if ((vaddr < PAGE_OFFSET) || (vaddr >= MAP_BASE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) EXPORT_SYMBOL_GPL(__virt_addr_valid);