^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * arch/xtensa/kernel/syscall.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * This file is subject to the terms and conditions of the GNU General Public
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * License. See the file "COPYING" in the main directory of this archive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright (C) 2001 - 2005 Tensilica Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Copyright (C) 2000 Silicon Graphics, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Copyright (C) 1995 - 2000 by Ralf Baechle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * Marc Gauthier <marc@tensilica.com, marc@alumni.uwaterloo.ca>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * Chris Zankel <chris@zankel.net>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * Kevin Chea
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <asm/syscall.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <asm/unistd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/linkage.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/stringify.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/syscalls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/mman.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/sched/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/shm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) syscall_t sys_call_table[__NR_syscalls] /* FIXME __cacheline_aligned */= {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) [0 ... __NR_syscalls - 1] = (syscall_t)&sys_ni_syscall,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #define __SYSCALL(nr, entry, nargs)[nr] = (syscall_t)entry,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <asm/syscall_table.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #undef __SYSCALL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define COLOUR_ALIGN(addr, pgoff) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) ((((addr) + SHMLBA - 1) & ~(SHMLBA - 1)) + \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) (((pgoff) << PAGE_SHIFT) & (SHMLBA - 1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) asmlinkage long xtensa_shmat(int shmid, char __user *shmaddr, int shmflg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) unsigned long ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) long err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) return (long)ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) asmlinkage long xtensa_fadvise64_64(int fd, int advice,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) unsigned long long offset, unsigned long long len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) return ksys_fadvise64_64(fd, offset, len, advice);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #ifdef CONFIG_MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) unsigned long len, unsigned long pgoff, unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) struct vm_area_struct *vmm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) if (flags & MAP_FIXED) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) /* We do not accept a shared mapping if it would violate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * cache aliasing constraints.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) if ((flags & MAP_SHARED) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) if (len > TASK_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) if (!addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) addr = TASK_UNMAPPED_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) if (flags & MAP_SHARED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) addr = COLOUR_ALIGN(addr, pgoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) addr = PAGE_ALIGN(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) /* At this point: (!vmm || addr < vmm->vm_end). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) if (TASK_SIZE - len < addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) if (!vmm || addr + len <= vm_start_gap(vmm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) addr = vmm->vm_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) if (flags & MAP_SHARED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) addr = COLOUR_ALIGN(addr, pgoff);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #endif