^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Based on arch/arm/kernel/sys_arm.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) People who wrote linux/arch/i386/kernel/sys_i386.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 1995, 1996 Russell King.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright (C) 2012 ARM Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/compat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/cpufeature.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/personality.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/syscalls.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <asm/system_misc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <asm/unistd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) static long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) __do_compat_cache_op(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) long ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) unsigned long chunk = min(PAGE_SIZE, end - start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) if (fatal_signal_pending(current))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) if (cpus_have_const_cap(ARM64_WORKAROUND_1542419)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * The workaround requires an inner-shareable tlbi.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * We pick the reserved-ASID to minimise the impact.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) __tlbi(aside1is, __TLBI_VADDR(0, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) dsb(ish);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) ret = __flush_cache_user_range(start, start + chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) start += chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) } while (start < end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) static inline long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) do_compat_cache_op(unsigned long start, unsigned long end, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) if (end < start || flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) if (!access_ok((const void __user *)start, end - start))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) return -EFAULT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) return __do_compat_cache_op(start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * Handle all unrecognised system calls.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) long compat_arm_syscall(struct pt_regs *regs, int scno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) switch (scno) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * Flush a region from virtual address 'r0' to virtual address 'r1'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * _exclusive_. There is no alignment requirement on either address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * user space does not need to know the hardware cache layout.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * r2 contains flags. It should ALWAYS be passed as ZERO until it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * is defined to be something else. For now we ignore it, but may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * the fires of hell burn in your belly if you break this rule. ;)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) * (at a later date, we may want to allow this call to not flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) * various aspects of the cache. Passing '0' will guarantee that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) * everything necessary gets flushed to maintain consistency in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) * the specified region).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) case __ARM_NR_compat_cacheflush:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) return do_compat_cache_op(regs->regs[0], regs->regs[1], regs->regs[2]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) case __ARM_NR_compat_set_tls:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) current->thread.uw.tp_value = regs->regs[0];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * Protect against register corruption from context switch.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * See comment in tls_thread_flush.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) write_sysreg(regs->regs[0], tpidrro_el0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * Calls 0xf0xxx..0xf07ff are defined to return -ENOSYS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) * if not implemented, rather than raising SIGILL. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) * way the calling program can gracefully determine whether
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * a feature is supported.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) if (scno < __ARM_NR_COMPAT_END)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) return -ENOSYS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) addr = instruction_pointer(regs) - (compat_thumb_mode(regs) ? 2 : 4);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) arm64_notify_die("Oops - bad compat syscall(2)", regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) SIGILL, ILL_ILLTRP, addr, scno);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }