^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * arch/sh/mm/mmap.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2008 - 2009 Paul Mundt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * This file is subject to the terms and conditions of the GNU General Public
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * License. See the file "COPYING" in the main directory of this archive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/sched/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/mman.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <asm/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) EXPORT_SYMBOL(shm_align_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #ifdef CONFIG_MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * To avoid cache aliases, we map the shared page with same color.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) static inline unsigned long COLOUR_ALIGN(unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) unsigned long pgoff)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) unsigned long base = (addr + shm_align_mask) & ~shm_align_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) return base + off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) unsigned long len, unsigned long pgoff, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) struct mm_struct *mm = current->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct vm_area_struct *vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) int do_colour_align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) struct vm_unmapped_area_info info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) if (flags & MAP_FIXED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /* We do not accept a shared mapping if it would violate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * cache aliasing constraints.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) if ((flags & MAP_SHARED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) if (unlikely(len > TASK_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) do_colour_align = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) if (filp || (flags & MAP_SHARED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) do_colour_align = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) if (addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) if (do_colour_align)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) addr = COLOUR_ALIGN(addr, pgoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) addr = PAGE_ALIGN(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) vma = find_vma(mm, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) if (TASK_SIZE - len >= addr &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) (!vma || addr + len <= vm_start_gap(vma)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) info.flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) info.length = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) info.low_limit = TASK_UNMAPPED_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) info.high_limit = TASK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) info.align_offset = pgoff << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) return vm_unmapped_area(&info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) const unsigned long len, const unsigned long pgoff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) const unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) struct vm_area_struct *vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) struct mm_struct *mm = current->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) unsigned long addr = addr0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) int do_colour_align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) struct vm_unmapped_area_info info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) if (flags & MAP_FIXED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /* We do not accept a shared mapping if it would violate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * cache aliasing constraints.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) if ((flags & MAP_SHARED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) if (unlikely(len > TASK_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) do_colour_align = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) if (filp || (flags & MAP_SHARED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) do_colour_align = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /* requesting a specific address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) if (addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) if (do_colour_align)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) addr = COLOUR_ALIGN(addr, pgoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) addr = PAGE_ALIGN(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) vma = find_vma(mm, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (TASK_SIZE - len >= addr &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) (!vma || addr + len <= vm_start_gap(vma)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) info.flags = VM_UNMAPPED_AREA_TOPDOWN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) info.length = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) info.low_limit = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) info.high_limit = mm->mmap_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) info.align_offset = pgoff << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) addr = vm_unmapped_area(&info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) * A failed mmap() very likely causes application failure,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) * so fall back to the bottom-up function here. This scenario
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * can happen with large stack limits and large mmap()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * allocations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) if (addr & ~PAGE_MASK) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) VM_BUG_ON(addr != -ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) info.flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) info.low_limit = TASK_UNMAPPED_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) info.high_limit = TASK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) addr = vm_unmapped_area(&info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) #endif /* CONFIG_MMU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * You really shouldn't be using read() or write() on /dev/mem. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * might go away in the future.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) int valid_phys_addr_range(phys_addr_t addr, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) if (addr < __MEMORY_START)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) if (addr + count > __pa(high_memory))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }