^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #ifndef _ASM_X86_UACCESS_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define _ASM_X86_UACCESS_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * User space memory access functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/compiler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/kasan-checks.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <asm/asm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <asm/smap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <asm/extable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * Test whether a block of memory is a valid user space address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * Returns 0 if the range is valid, nonzero otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, unsigned long limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * If we have used "sizeof()" for the size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * we know it won't overflow the limit (but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * it might overflow the 'addr', so it's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * important to subtract the size from the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * limit, not add it to the address).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) if (__builtin_constant_p(size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) return unlikely(addr > limit - size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) /* Arbitrary sizes? Be careful about overflow */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) addr += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) if (unlikely(addr < size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) return unlikely(addr > limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define __range_not_ok(addr, size, limit) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) __chk_user_ptr(addr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) __chk_range_not_ok((unsigned long __force)(addr), size, limit); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) static inline bool pagefault_disabled(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) # define WARN_ON_IN_IRQ() \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) WARN_ON_ONCE(!in_task() && !pagefault_disabled())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) # define WARN_ON_IN_IRQ()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * access_ok - Checks if a user space pointer is valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * @addr: User space pointer to start of block to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * @size: Size of block to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * Context: User context only. This function may sleep if pagefaults are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) * enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) * Checks if a pointer to a block of memory in user space is valid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * Note that, depending on architecture, this function probably just
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * checks that the pointer is in the user space range - after calling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * this function, memory access functions may still return -EFAULT.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * Return: true (nonzero) if the memory block may be valid, false (zero)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * if it is definitely invalid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define access_ok(addr, size) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) WARN_ON_IN_IRQ(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) likely(!__range_not_ok(addr, size, TASK_SIZE_MAX)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) extern int __get_user_1(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) extern int __get_user_2(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) extern int __get_user_4(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) extern int __get_user_8(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) extern int __get_user_nocheck_1(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) extern int __get_user_nocheck_2(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) extern int __get_user_nocheck_4(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) extern int __get_user_nocheck_8(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) extern int __get_user_bad(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #define __uaccess_begin() stac()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #define __uaccess_end() clac()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define __uaccess_begin_nospec() \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) stac(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) barrier_nospec(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * This is the smallest unsigned integer type that can fit a value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * (up to 'long long')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #define __inttype(x) __typeof__( \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) __typefits(x,char, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) __typefits(x,short, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) __typefits(x,int, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) __typefits(x,long,0ULL)))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #define __typefits(x,type,not) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) __builtin_choose_expr(sizeof(x)<=sizeof(type),(unsigned type)0,not)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) * This is used for both get_user() and __get_user() to expand to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * the proper special function call that has odd calling conventions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * due to returning both a value and an error, and that depends on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * the size of the pointer passed in.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * Careful: we have to cast the result to the type of the pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * for sign reasons.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * The use of _ASM_DX as the register specifier is a bit of a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * simplification, as gcc only cares about it as the starting point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * and not size: for a 64-bit value it will use %ecx:%edx on 32 bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * (%ecx being the next register in gcc's x86 register sequence), and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * %rdx on 64 bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * Clang/LLVM cares about the size of the register, but still wants
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * the base register for something that ends up being a pair.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #define do_get_user_call(fn,x,ptr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) int __ret_gu; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) __chk_user_ptr(ptr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) asm volatile("call __" #fn "_%P4" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) : "=a" (__ret_gu), "=r" (__val_gu), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) ASM_CALL_CONSTRAINT \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) : "0" (ptr), "i" (sizeof(*(ptr)))); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) (x) = (__force __typeof__(*(ptr))) __val_gu; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) __builtin_expect(__ret_gu, 0); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) * get_user - Get a simple variable from user space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) * @x: Variable to store result.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) * @ptr: Source address, in user space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * Context: User context only. This function may sleep if pagefaults are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * This macro copies a single simple variable from user space to kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * space. It supports simple types like char and int, but not larger
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * data types like structures or arrays.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * @ptr must have pointer-to-simple-variable type, and the result of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * dereferencing @ptr must be assignable to @x without a cast.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * Return: zero on success, or -EFAULT on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * On error, the variable @x is set to zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) #define get_user(x,ptr) ({ might_fault(); do_get_user_call(get_user,x,ptr); })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) * __get_user - Get a simple variable from user space, with less checking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) * @x: Variable to store result.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * @ptr: Source address, in user space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * Context: User context only. This function may sleep if pagefaults are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * This macro copies a single simple variable from user space to kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * space. It supports simple types like char and int, but not larger
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * data types like structures or arrays.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * @ptr must have pointer-to-simple-variable type, and the result of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * dereferencing @ptr must be assignable to @x without a cast.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * Caller must check the pointer with access_ok() before calling this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * Return: zero on success, or -EFAULT on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * On error, the variable @x is set to zero.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) #define __get_user(x,ptr) do_get_user_call(get_user_nocheck,x,ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) #define __put_user_goto_u64(x, addr, label) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) asm_volatile_goto("\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) "1: movl %%eax,0(%1)\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) "2: movl %%edx,4(%1)\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) _ASM_EXTABLE_UA(1b, %l2) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) _ASM_EXTABLE_UA(2b, %l2) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) : : "A" (x), "r" (addr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) : : label)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) #define __put_user_goto_u64(x, ptr, label) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) __put_user_goto(x, ptr, "q", "er", label)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) extern void __put_user_bad(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * Strange magic calling convention: pointer in %ecx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * value in %eax(:%edx), return value in %ecx. clobbers %rbx
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) extern void __put_user_1(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) extern void __put_user_2(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) extern void __put_user_4(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) extern void __put_user_8(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) extern void __put_user_nocheck_1(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) extern void __put_user_nocheck_2(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) extern void __put_user_nocheck_4(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) extern void __put_user_nocheck_8(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * ptr must be evaluated and assigned to the temporary __ptr_pu before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * the assignment of x to __val_pu, to avoid any function calls
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * involved in the ptr expression (possibly implicitly generated due
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * to KASAN) from clobbering %ax.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) #define do_put_user_call(fn,x,ptr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) int __ret_pu; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) void __user *__ptr_pu; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) register __typeof__(*(ptr)) __val_pu asm("%"_ASM_AX); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) __chk_user_ptr(ptr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) __ptr_pu = (ptr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) __val_pu = (x); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) asm volatile("call __" #fn "_%P[size]" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) : "=c" (__ret_pu), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) ASM_CALL_CONSTRAINT \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) : "0" (__ptr_pu), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) "r" (__val_pu), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) [size] "i" (sizeof(*(ptr))) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) :"ebx"); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) __builtin_expect(__ret_pu, 0); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * put_user - Write a simple value into user space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * @x: Value to copy to user space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * @ptr: Destination address, in user space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * Context: User context only. This function may sleep if pagefaults are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * This macro copies a single simple value from kernel space to user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * space. It supports simple types like char and int, but not larger
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * data types like structures or arrays.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) * @ptr must have pointer-to-simple-variable type, and @x must be assignable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * to the result of dereferencing @ptr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * Return: zero on success, or -EFAULT on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) #define put_user(x, ptr) ({ might_fault(); do_put_user_call(put_user,x,ptr); })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * __put_user - Write a simple value into user space, with less checking.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * @x: Value to copy to user space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * @ptr: Destination address, in user space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) * Context: User context only. This function may sleep if pagefaults are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * This macro copies a single simple value from kernel space to user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * space. It supports simple types like char and int, but not larger
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * data types like structures or arrays.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * @ptr must have pointer-to-simple-variable type, and @x must be assignable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * to the result of dereferencing @ptr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * Caller must check the pointer with access_ok() before calling this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) * Return: zero on success, or -EFAULT on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) #define __put_user(x, ptr) do_put_user_call(put_user_nocheck,x,ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) #define __put_user_size(x, ptr, size, label) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) __chk_user_ptr(ptr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) switch (size) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) case 1: \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) __put_user_goto(x, ptr, "b", "iq", label); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) case 2: \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) __put_user_goto(x, ptr, "w", "ir", label); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) case 4: \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) __put_user_goto(x, ptr, "l", "ir", label); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) case 8: \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) __put_user_goto_u64(x, ptr, label); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) default: \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) __put_user_bad(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) #define __get_user_asm_u64(x, ptr, label) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) unsigned int __gu_low, __gu_high; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) const unsigned int __user *__gu_ptr; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) __gu_ptr = (const void __user *)(ptr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) __get_user_asm(__gu_low, __gu_ptr, "l", "=r", label); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) __get_user_asm(__gu_high, __gu_ptr+1, "l", "=r", label); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) (x) = ((unsigned long long)__gu_high << 32) | __gu_low; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) #define __get_user_asm_u64(x, ptr, label) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) __get_user_asm(x, ptr, "q", "=r", label)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) #define __get_user_size(x, ptr, size, label) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) __chk_user_ptr(ptr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) switch (size) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) case 1: { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) unsigned char x_u8__; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) __get_user_asm(x_u8__, ptr, "b", "=q", label); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) (x) = x_u8__; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) case 2: \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) __get_user_asm(x, ptr, "w", "=r", label); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) case 4: \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) __get_user_asm(x, ptr, "l", "=r", label); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) case 8: \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) __get_user_asm_u64(x, ptr, label); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) default: \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) (x) = __get_user_bad(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) #define __get_user_asm(x, addr, itype, ltype, label) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) asm_volatile_goto("\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) "1: mov"itype" %[umem],%[output]\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) _ASM_EXTABLE_UA(1b, %l2) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) : [output] ltype(x) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) : [umem] "m" (__m(addr)) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) : : label)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) #else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) #define __get_user_asm_u64(x, ptr, retval) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) __typeof__(ptr) __ptr = (ptr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) asm volatile("\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) "1: movl %[lowbits],%%eax\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) "2: movl %[highbits],%%edx\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) "3:\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) ".section .fixup,\"ax\"\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) "4: mov %[efault],%[errout]\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) " xorl %%eax,%%eax\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) " xorl %%edx,%%edx\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) " jmp 3b\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) ".previous\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) _ASM_EXTABLE_UA(1b, 4b) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) _ASM_EXTABLE_UA(2b, 4b) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) : [errout] "=r" (retval), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) [output] "=&A"(x) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) : [lowbits] "m" (__m(__ptr)), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) [highbits] "m" __m(((u32 __user *)(__ptr)) + 1), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) [efault] "i" (-EFAULT), "0" (retval)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) #define __get_user_asm_u64(x, ptr, retval) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) __get_user_asm(x, ptr, retval, "q", "=r")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) #define __get_user_size(x, ptr, size, retval) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) unsigned char x_u8__; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) retval = 0; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) __chk_user_ptr(ptr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) switch (size) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) case 1: \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) __get_user_asm(x_u8__, ptr, retval, "b", "=q"); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) (x) = x_u8__; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) case 2: \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) __get_user_asm(x, ptr, retval, "w", "=r"); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) case 4: \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) __get_user_asm(x, ptr, retval, "l", "=r"); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) case 8: \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) __get_user_asm_u64(x, ptr, retval); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) default: \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) (x) = __get_user_bad(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) #define __get_user_asm(x, addr, err, itype, ltype) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) asm volatile("\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) "1: mov"itype" %[umem],%[output]\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) "2:\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) ".section .fixup,\"ax\"\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) "3: mov %[efault],%[errout]\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) " xorl %k[output],%k[output]\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) " jmp 2b\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) ".previous\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) _ASM_EXTABLE_UA(1b, 3b) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) : [errout] "=r" (err), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) [output] ltype(x) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) : [umem] "m" (__m(addr)), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) [efault] "i" (-EFAULT), "0" (err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) #endif // CONFIG_CC_ASM_GOTO_OUTPUT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) /* FIXME: this hack is definitely wrong -AK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) struct __large_struct { unsigned long buf[100]; };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) #define __m(x) (*(struct __large_struct __user *)(x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * Tell gcc we read from memory instead of writing: this is because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) * we do not write to any memory gcc knows about, so there are no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * aliasing issues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) #define __put_user_goto(x, addr, itype, ltype, label) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) asm_volatile_goto("\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) "1: mov"itype" %0,%1\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) _ASM_EXTABLE_UA(1b, %l2) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) : : ltype(x), "m" (__m(addr)) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) : : label)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) extern unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) extern __must_check long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) strncpy_from_user(char *dst, const char __user *src, long count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) extern __must_check long strnlen_user(const char __user *str, long n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) unsigned long __must_check clear_user(void __user *mem, unsigned long len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) #ifdef CONFIG_ARCH_HAS_COPY_MC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) unsigned long __must_check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) copy_mc_to_kernel(void *to, const void *from, unsigned len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) #define copy_mc_to_kernel copy_mc_to_kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) unsigned long __must_check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) copy_mc_to_user(void *to, const void *from, unsigned len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) * movsl can be slow when source and dest are not both 8-byte aligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) #ifdef CONFIG_X86_INTEL_USERCOPY
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) extern struct movsl_mask {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) int mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) } ____cacheline_aligned_in_smp movsl_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) #define ARCH_HAS_NOCACHE_UACCESS 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) #ifdef CONFIG_X86_32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) # include <asm/uaccess_32.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) # include <asm/uaccess_64.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) * The "unsafe" user accesses aren't really "unsafe", but the naming
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) * is a big fat warning: you have to not only do the access_ok()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) * checking before using them, but you have to surround them with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) * user_access_begin/end() pair.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) static __must_check __always_inline bool user_access_begin(const void __user *ptr, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) if (unlikely(!access_ok(ptr,len)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) __uaccess_begin_nospec();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) #define user_access_begin(a,b) user_access_begin(a,b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) #define user_access_end() __uaccess_end()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) #define user_access_save() smap_save()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) #define user_access_restore(x) smap_restore(x)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) #define unsafe_put_user(x, ptr, label) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) __put_user_size((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) #define unsafe_get_user(x, ptr, err_label) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) __inttype(*(ptr)) __gu_val; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), err_label); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) (x) = (__force __typeof__(*(ptr)))__gu_val; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) #else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) #define unsafe_get_user(x, ptr, err_label) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) int __gu_err; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) __inttype(*(ptr)) __gu_val; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) (x) = (__force __typeof__(*(ptr)))__gu_val; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) if (unlikely(__gu_err)) goto err_label; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) #endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) * We want the unsafe accessors to always be inlined and use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) * the error labels - thus the macro games.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) #define unsafe_copy_loop(dst, src, len, type, label) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) while (len >= sizeof(type)) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) unsafe_put_user(*(type *)(src),(type __user *)(dst),label); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) dst += sizeof(type); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) src += sizeof(type); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) len -= sizeof(type); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) #define unsafe_copy_to_user(_dst,_src,_len,label) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) char __user *__ucu_dst = (_dst); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) const char *__ucu_src = (_src); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) size_t __ucu_len = (_len); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u64, label); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u32, label); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u16, label); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) unsafe_copy_loop(__ucu_dst, __ucu_src, __ucu_len, u8, label); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) #define HAVE_GET_KERNEL_NOFAULT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) #ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) #define __get_kernel_nofault(dst, src, type, err_label) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) __get_user_size(*((type *)(dst)), (__force type __user *)(src), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) sizeof(type), err_label)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) #else // !CONFIG_CC_HAS_ASM_GOTO_OUTPUT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) #define __get_kernel_nofault(dst, src, type, err_label) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) int __kr_err; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) __get_user_size(*((type *)(dst)), (__force type __user *)(src), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) sizeof(type), __kr_err); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) if (unlikely(__kr_err)) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) goto err_label; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) #endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) #define __put_kernel_nofault(dst, src, type, err_label) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) __put_user_size(*((type *)(src)), (__force type __user *)(dst), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) sizeof(type), err_label)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) #endif /* _ASM_X86_UACCESS_H */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555)