^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * linux/arch/arm/mm/mmap.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/mman.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/shm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/sched/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/personality.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/random.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <asm/cachetype.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #define COLOUR_ALIGN(addr,pgoff) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * We need to ensure that shared mappings are correctly aligned to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * avoid aliasing issues with VIPT caches. We need to ensure that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * a specific page of an object is always mapped at a multiple of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * SHMLBA bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * We unconditionally provide this function for all cases, however
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * in the VIVT case, we optimise out the alignment rules.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) arch_get_unmapped_area(struct file *filp, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) unsigned long len, unsigned long pgoff, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) struct mm_struct *mm = current->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) struct vm_area_struct *vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) int do_align = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) int aliasing = cache_is_vipt_aliasing();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) struct vm_unmapped_area_info info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * We only need to do colour alignment if either the I or D
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * caches alias.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) if (aliasing)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) do_align = filp || (flags & MAP_SHARED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * We enforce the MAP_FIXED case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) if (flags & MAP_FIXED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) if (aliasing && flags & MAP_SHARED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) if (len > TASK_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) if (addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) if (do_align)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) addr = COLOUR_ALIGN(addr, pgoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) addr = PAGE_ALIGN(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) vma = find_vma(mm, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) if (TASK_SIZE - len >= addr &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) (!vma || addr + len <= vm_start_gap(vma)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) info.flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) info.length = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) info.low_limit = mm->mmap_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) info.high_limit = TASK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) info.align_offset = pgoff << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) return vm_unmapped_area(&info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) const unsigned long len, const unsigned long pgoff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) const unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) struct vm_area_struct *vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) struct mm_struct *mm = current->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) unsigned long addr = addr0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) int do_align = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) int aliasing = cache_is_vipt_aliasing();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) struct vm_unmapped_area_info info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * We only need to do colour alignment if either the I or D
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * caches alias.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) if (aliasing)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) do_align = filp || (flags & MAP_SHARED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) /* requested length too big for entire address space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) if (len > TASK_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) if (flags & MAP_FIXED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) if (aliasing && flags & MAP_SHARED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) /* requesting a specific address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) if (addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) if (do_align)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) addr = COLOUR_ALIGN(addr, pgoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) addr = PAGE_ALIGN(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) vma = find_vma(mm, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) if (TASK_SIZE - len >= addr &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) (!vma || addr + len <= vm_start_gap(vma)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) info.flags = VM_UNMAPPED_AREA_TOPDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) info.length = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) info.low_limit = FIRST_USER_ADDRESS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) info.high_limit = mm->mmap_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) info.align_offset = pgoff << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) addr = vm_unmapped_area(&info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * A failed mmap() very likely causes application failure,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * so fall back to the bottom-up function here. This scenario
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * can happen with large stack limits and large mmap()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * allocations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) if (addr & ~PAGE_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) VM_BUG_ON(addr != -ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) info.flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) info.low_limit = mm->mmap_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) info.high_limit = TASK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) addr = vm_unmapped_area(&info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * You really shouldn't be using read() or write() on /dev/mem. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * might go away in the future.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) int valid_phys_addr_range(phys_addr_t addr, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) if (addr < PHYS_OFFSET)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) if (addr + size > __pa(high_memory - 1) + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * Do not allow /dev/mem mappings beyond the supported physical range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) return (pfn + (size >> PAGE_SHIFT)) <= (1 + (PHYS_MASK >> PAGE_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) #ifdef CONFIG_STRICT_DEVMEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * devmem_is_allowed() checks to see if /dev/mem access to a certain
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * address is valid. The argument is a physical page number.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * We mimic x86 here by disallowing access to system RAM as well as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * device-exclusive MMIO regions. This effectively disable read()/write()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * on /dev/mem.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) int devmem_is_allowed(unsigned long pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (iomem_is_exclusive(pfn << PAGE_SHIFT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) if (!page_is_ram(pfn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) #endif