^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/mman.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/shm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/random.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #define COLOUR_ALIGN(addr,pgoff) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * We need to ensure that shared mappings are correctly aligned to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * avoid aliasing issues with VIPT caches. We need to ensure that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * a specific page of an object is always mapped at a multiple of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * SHMLBA bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * We unconditionally provide this function for all cases.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) arch_get_unmapped_area(struct file *filp, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) unsigned long len, unsigned long pgoff, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) struct mm_struct *mm = current->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) struct vm_area_struct *vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) int do_align = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct vm_unmapped_area_info info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * We only need to do colour alignment if either the I or D
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * caches alias.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) do_align = filp || (flags & MAP_SHARED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * We enforce the MAP_FIXED case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) if (flags & MAP_FIXED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) if (flags & MAP_SHARED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) if (len > TASK_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) if (addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) if (do_align)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) addr = COLOUR_ALIGN(addr, pgoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) addr = PAGE_ALIGN(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) vma = find_vma(mm, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) if (TASK_SIZE - len >= addr &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) (!vma || addr + len <= vm_start_gap(vma)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) info.flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) info.length = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) info.low_limit = mm->mmap_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) info.high_limit = TASK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) info.align_offset = pgoff << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) return vm_unmapped_area(&info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) }