^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) // Copyright (C) 2005-2017 Andes Technology Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/mman.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/shm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #define COLOUR_ALIGN(addr,pgoff) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * We need to ensure that shared mappings are correctly aligned to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * avoid aliasing issues with VIPT caches. We need to ensure that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * a specific page of an object is always mapped at a multiple of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * SHMLBA bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * We unconditionally provide this function for all cases, however
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * in the VIVT case, we optimise out the alignment rules.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) arch_get_unmapped_area(struct file *filp, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) unsigned long len, unsigned long pgoff,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) struct mm_struct *mm = current->mm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) struct vm_area_struct *vma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) int do_align = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) struct vm_unmapped_area_info info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) int aliasing = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) if(IS_ENABLED(CONFIG_CPU_CACHE_ALIASING))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) aliasing = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * We only need to do colour alignment if either the I or D
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * caches alias.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) if (aliasing)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) do_align = filp || (flags & MAP_SHARED);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * We enforce the MAP_FIXED case.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) if (flags & MAP_FIXED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) if (aliasing && flags & MAP_SHARED &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) if (len > TASK_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) if (addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) if (do_align)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) addr = COLOUR_ALIGN(addr, pgoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) addr = PAGE_ALIGN(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) vma = find_vma(mm, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) if (TASK_SIZE - len >= addr &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) (!vma || addr + len <= vm_start_gap(vma)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) info.flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) info.length = len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) info.low_limit = mm->mmap_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) info.high_limit = TASK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) info.align_offset = pgoff << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) return vm_unmapped_area(&info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }