^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Based on arch/arm/include/asm/uaccess.h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2012 ARM Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #ifndef __ASM_UACCESS_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #define __ASM_UACCESS_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <asm/alternative.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <asm/kernel-pgtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <asm/sysreg.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * User space memory access functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/kasan-checks.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <asm/cpufeature.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <asm/mmu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <asm/mte.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <asm/ptrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <asm/memory.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <asm/extable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define HAVE_GET_KERNEL_NOFAULT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define get_fs() (current_thread_info()->addr_limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) static inline void set_fs(mm_segment_t fs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) current_thread_info()->addr_limit = fs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * Prevent a mispredicted conditional call to set_fs from forwarding
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * the wrong address limit to access_ok under speculation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) spec_bar();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) /* On user-mode return, check fs is correct */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) set_thread_flag(TIF_FSCHECK);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * Enable/disable UAO so that copy_to_user() etc can access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * kernel memory with the unprivileged instructions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) if (IS_ENABLED(CONFIG_ARM64_UAO) && fs == KERNEL_DS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) CONFIG_ARM64_UAO));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define uaccess_kernel() (get_fs() == KERNEL_DS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * Test whether a block of memory is a valid user space address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * Returns 1 if the range is valid, 0 otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * This is equivalent to the following test:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * (u65)addr + (u65)size <= (u65)current->addr_limit + 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) static inline unsigned long __range_ok(const void __user *addr, unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) unsigned long ret, limit = current_thread_info()->addr_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * Asynchronous I/O running in a kernel thread does not have the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * TIF_TAGGED_ADDR flag of the process owning the mm, so always untag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * the user address before checking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) (current->flags & PF_KTHREAD || test_thread_flag(TIF_TAGGED_ADDR)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) addr = untagged_addr(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) __chk_user_ptr(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) asm volatile(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) // A + B <= C + 1 for all A,B,C, in four easy steps:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) // 1: X = A + B; X' = X % 2^64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) " adds %0, %3, %2\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) // 2: Set C = 0 if X > 2^64, to guarantee X' > C in step 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) " csel %1, xzr, %1, hi\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) // 3: Set X' = ~0 if X >= 2^64. For X == 2^64, this decrements X'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) // to compensate for the carry flag being set in step 4. For
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) // X > 2^64, X' merely has to remain nonzero, which it does.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) " csinv %0, %0, xzr, cc\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) // 4: For X < 2^64, this gives us X' - C - 1 <= 0, where the -1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) // comes from the carry in being clear. Otherwise, we are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) // testing X' - C == 0, subject to the previous adjustments.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) " sbcs xzr, %0, %1\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) " cset %0, ls\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) : "=&r" (ret), "+r" (limit) : "Ir" (size), "0" (addr) : "cc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define access_ok(addr, size) __range_ok(addr, size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define user_addr_max get_fs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #define _ASM_EXTABLE(from, to) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) " .pushsection __ex_table, \"a\"\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) " .align 3\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) " .long (" #from " - .), (" #to " - .)\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) " .popsection\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * User access enabling/disabling.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #ifdef CONFIG_ARM64_SW_TTBR0_PAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) static inline void __uaccess_ttbr0_disable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) unsigned long flags, ttbr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) ttbr = read_sysreg(ttbr1_el1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) ttbr &= ~TTBR_ASID_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) /* reserved_pg_dir placed before swapper_pg_dir */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) write_sysreg(ttbr - PAGE_SIZE, ttbr0_el1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) isb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) /* Set reserved ASID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) write_sysreg(ttbr, ttbr1_el1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) isb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) static inline void __uaccess_ttbr0_enable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) unsigned long flags, ttbr0, ttbr1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * Disable interrupts to avoid preemption between reading the 'ttbr0'
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * variable and the MSR. A context switch could trigger an ASID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * roll-over and an update of 'ttbr0'.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) local_irq_save(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) ttbr0 = READ_ONCE(current_thread_info()->ttbr0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) /* Restore active ASID */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) ttbr1 = read_sysreg(ttbr1_el1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) ttbr1 &= ~TTBR_ASID_MASK; /* safety measure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) ttbr1 |= ttbr0 & TTBR_ASID_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) write_sysreg(ttbr1, ttbr1_el1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) isb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) /* Restore user page table */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) write_sysreg(ttbr0, ttbr0_el1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) isb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) local_irq_restore(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) static inline bool uaccess_ttbr0_disable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) if (!system_uses_ttbr0_pan())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) __uaccess_ttbr0_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) static inline bool uaccess_ttbr0_enable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) if (!system_uses_ttbr0_pan())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) __uaccess_ttbr0_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) static inline bool uaccess_ttbr0_disable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) static inline bool uaccess_ttbr0_enable(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) static inline void __uaccess_disable_hw_pan(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), ARM64_HAS_PAN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) CONFIG_ARM64_PAN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) static inline void __uaccess_enable_hw_pan(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), ARM64_HAS_PAN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) CONFIG_ARM64_PAN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) #define __uaccess_disable(alt) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) if (!uaccess_ttbr0_disable()) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) asm(ALTERNATIVE("nop", SET_PSTATE_PAN(1), alt, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) CONFIG_ARM64_PAN)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) #define __uaccess_enable(alt) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) if (!uaccess_ttbr0_enable()) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) asm(ALTERNATIVE("nop", SET_PSTATE_PAN(0), alt, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) CONFIG_ARM64_PAN)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * The Tag Check Flag (TCF) mode for MTE is per EL, hence TCF0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * affects EL0 and TCF affects EL1 irrespective of which TTBR is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) * used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * The kernel accesses TTBR0 usually with LDTR/STTR instructions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * when UAO is available, so these would act as EL0 accesses using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * TCF0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * However futex.h code uses exclusives which would be executed as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * EL1, this can potentially cause a tag check fault even if the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * user disables TCF0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * To address the problem we set the PSTATE.TCO bit in uaccess_enable()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * and reset it in uaccess_disable().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * The Tag check override (TCO) bit disables temporarily the tag checking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * preventing the issue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) static inline void __uaccess_disable_tco(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) asm volatile(ALTERNATIVE("nop", SET_PSTATE_TCO(0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) ARM64_MTE, CONFIG_KASAN_HW_TAGS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) static inline void __uaccess_enable_tco(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) asm volatile(ALTERNATIVE("nop", SET_PSTATE_TCO(1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) ARM64_MTE, CONFIG_KASAN_HW_TAGS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * These functions disable tag checking only if in MTE async mode
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * since the sync mode generates exceptions synchronously and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * nofault or load_unaligned_zeropad can handle them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) static inline void __uaccess_disable_tco_async(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) if (system_uses_mte_async_mode())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) __uaccess_disable_tco();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) static inline void __uaccess_enable_tco_async(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (system_uses_mte_async_mode())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) __uaccess_enable_tco();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) static inline void uaccess_disable_privileged(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) __uaccess_disable_tco();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) __uaccess_disable(ARM64_HAS_PAN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) static inline void uaccess_enable_privileged(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) __uaccess_enable_tco();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) __uaccess_enable(ARM64_HAS_PAN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * These functions are no-ops when UAO is present.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) static inline void uaccess_disable_not_uao(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) __uaccess_disable(ARM64_ALT_PAN_NOT_UAO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) static inline void uaccess_enable_not_uao(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) __uaccess_enable(ARM64_ALT_PAN_NOT_UAO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) * Sanitise a uaccess pointer such that it becomes NULL if above the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * current addr_limit. In case the pointer is tagged (has the top byte set),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * untag the pointer before checking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) #define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) void __user *safe_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) asm volatile(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) " bics xzr, %3, %2\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) " csel %0, %1, xzr, eq\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) : "=&r" (safe_ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) : "r" (ptr), "r" (current_thread_info()->addr_limit),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) "r" (untagged_addr(ptr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) : "cc");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) csdb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) return safe_ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) * The "__xxx" versions of the user access functions do not verify the address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * space - it must have been done previously with a separate "access_ok()"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * The "__xxx_error" versions set the third argument to -EFAULT if an error
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * occurs, and leave it unchanged on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) #define __get_mem_asm(load, reg, x, addr, err) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) asm volatile( \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) "1: " load " " reg "1, [%2]\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) "2:\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) " .section .fixup, \"ax\"\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) " .align 2\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) "3: mov %w0, %3\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) " mov %1, #0\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) " b 2b\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) " .previous\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) _ASM_EXTABLE(1b, 3b) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) : "+r" (err), "=&r" (x) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) : "r" (addr), "i" (-EFAULT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) #define __raw_get_mem(ldr, x, ptr, err) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) unsigned long __gu_val; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) switch (sizeof(*(ptr))) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) case 1: \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) __get_mem_asm(ldr "b", "%w", __gu_val, (ptr), (err)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) case 2: \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) __get_mem_asm(ldr "h", "%w", __gu_val, (ptr), (err)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) case 4: \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) __get_mem_asm(ldr, "%w", __gu_val, (ptr), (err)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) case 8: \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) __get_mem_asm(ldr, "%x", __gu_val, (ptr), (err)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) default: \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) BUILD_BUG(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) (x) = (__force __typeof__(*(ptr)))__gu_val; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * We must not call into the scheduler between uaccess_enable_not_uao() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) * uaccess_disable_not_uao(). As `x` and `ptr` could contain blocking functions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) * we must evaluate these outside of the critical section.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) #define __raw_get_user(x, ptr, err) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) __typeof__(*(ptr)) __user *__rgu_ptr = (ptr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) __typeof__(x) __rgu_val; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) __chk_user_ptr(ptr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) uaccess_enable_not_uao(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) __raw_get_mem("ldtr", __rgu_val, __rgu_ptr, err); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) uaccess_disable_not_uao(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) (x) = __rgu_val; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) #define __get_user_error(x, ptr, err) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) __typeof__(*(ptr)) __user *__p = (ptr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) might_fault(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) if (access_ok(__p, sizeof(*__p))) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) __p = uaccess_mask_ptr(__p); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) __raw_get_user((x), __p, (err)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) } else { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) (x) = (__force __typeof__(x))0; (err) = -EFAULT; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) #define __get_user(x, ptr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) int __gu_err = 0; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) __get_user_error((x), (ptr), __gu_err); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) __gu_err; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) #define get_user __get_user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * We must not call into the scheduler between __uaccess_enable_tco_async() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * __uaccess_disable_tco_async(). As `dst` and `src` may contain blocking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) * functions, we must evaluate these outside of the critical section.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) #define __get_kernel_nofault(dst, src, type, err_label) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) __typeof__(dst) __gkn_dst = (dst); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) __typeof__(src) __gkn_src = (src); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) int __gkn_err = 0; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) __uaccess_enable_tco_async(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) __raw_get_mem("ldr", *((type *)(__gkn_dst)), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) (__force type *)(__gkn_src), __gkn_err); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) __uaccess_disable_tco_async(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) if (unlikely(__gkn_err)) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) goto err_label; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) #define __put_mem_asm(store, reg, x, addr, err) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) asm volatile( \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) "1: " store " " reg "1, [%2]\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) "2:\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) " .section .fixup,\"ax\"\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) " .align 2\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) "3: mov %w0, %3\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) " b 2b\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) " .previous\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) _ASM_EXTABLE(1b, 3b) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) : "+r" (err) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) : "r" (x), "r" (addr), "i" (-EFAULT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) #define __raw_put_mem(str, x, ptr, err) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) __typeof__(*(ptr)) __pu_val = (x); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) switch (sizeof(*(ptr))) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) case 1: \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) __put_mem_asm(str "b", "%w", __pu_val, (ptr), (err)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) case 2: \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) __put_mem_asm(str "h", "%w", __pu_val, (ptr), (err)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) case 4: \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) __put_mem_asm(str, "%w", __pu_val, (ptr), (err)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) case 8: \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) __put_mem_asm(str, "%x", __pu_val, (ptr), (err)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) default: \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) BUILD_BUG(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) * We must not call into the scheduler between uaccess_enable_not_uao() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) * uaccess_disable_not_uao(). As `x` and `ptr` could contain blocking functions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) * we must evaluate these outside of the critical section.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) #define __raw_put_user(x, ptr, err) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) __typeof__(*(ptr)) __user *__rpu_ptr = (ptr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) __typeof__(*(ptr)) __rpu_val = (x); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) __chk_user_ptr(__rpu_ptr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) uaccess_enable_not_uao(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) __raw_put_mem("sttr", __rpu_val, __rpu_ptr, err); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) uaccess_disable_not_uao(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) #define __put_user_error(x, ptr, err) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) __typeof__(*(ptr)) __user *__p = (ptr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) might_fault(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) if (access_ok(__p, sizeof(*__p))) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) __p = uaccess_mask_ptr(__p); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) __raw_put_user((x), __p, (err)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) } else { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) (err) = -EFAULT; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) #define __put_user(x, ptr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) int __pu_err = 0; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) __put_user_error((x), (ptr), __pu_err); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) __pu_err; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) #define put_user __put_user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) * We must not call into the scheduler between __uaccess_enable_tco_async() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) * __uaccess_disable_tco_async(). As `dst` and `src` may contain blocking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) * functions, we must evaluate these outside of the critical section.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) #define __put_kernel_nofault(dst, src, type, err_label) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) __typeof__(dst) __pkn_dst = (dst); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) __typeof__(src) __pkn_src = (src); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) int __pkn_err = 0; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) __uaccess_enable_tco_async(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) __raw_put_mem("str", *((type *)(__pkn_src)), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) (__force type *)(__pkn_dst), __pkn_err); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) __uaccess_disable_tco_async(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) if (unlikely(__pkn_err)) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) goto err_label; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) } while(0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) #define raw_copy_from_user(to, from, n) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) unsigned long __acfu_ret; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) uaccess_enable_not_uao(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) __acfu_ret = __arch_copy_from_user((to), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) __uaccess_mask_ptr(from), (n)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) uaccess_disable_not_uao(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) __acfu_ret; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) #define raw_copy_to_user(to, from, n) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) unsigned long __actu_ret; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) uaccess_enable_not_uao(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) __actu_ret = __arch_copy_to_user(__uaccess_mask_ptr(to), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) (from), (n)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) uaccess_disable_not_uao(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) __actu_ret; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) extern unsigned long __must_check __arch_copy_in_user(void __user *to, const void __user *from, unsigned long n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) #define raw_copy_in_user(to, from, n) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) unsigned long __aciu_ret; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) uaccess_enable_not_uao(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) __aciu_ret = __arch_copy_in_user(__uaccess_mask_ptr(to), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) __uaccess_mask_ptr(from), (n)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) uaccess_disable_not_uao(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) __aciu_ret; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) #define INLINE_COPY_TO_USER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) #define INLINE_COPY_FROM_USER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) extern unsigned long __must_check __arch_clear_user(void __user *to, unsigned long n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) static inline unsigned long __must_check __clear_user(void __user *to, unsigned long n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) if (access_ok(to, n)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) uaccess_enable_not_uao();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) n = __arch_clear_user(__uaccess_mask_ptr(to), n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) uaccess_disable_not_uao();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) return n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) #define clear_user __clear_user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) extern long strncpy_from_user(char *dest, const char __user *src, long count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) extern __must_check long strnlen_user(const char __user *str, long n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) struct page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) void memcpy_page_flushcache(char *to, struct page *page, size_t offset, size_t len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) extern unsigned long __must_check __copy_user_flushcache(void *to, const void __user *from, unsigned long n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) static inline int __copy_from_user_flushcache(void *dst, const void __user *src, unsigned size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) kasan_check_write(dst, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) return __copy_user_flushcache(dst, __uaccess_mask_ptr(src), size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) #endif /* __ASM_UACCESS_H */