Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) #ifndef _ARCH_POWERPC_UACCESS_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) #define _ARCH_POWERPC_UACCESS_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) #include <asm/ppc_asm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include <asm/processor.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <asm/extable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <asm/kup.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #ifdef __powerpc64__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) /* We use TASK_SIZE_USER64 as TASK_SIZE is not constant */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #define TASK_SIZE_MAX		TASK_SIZE_USER64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #define TASK_SIZE_MAX		TASK_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) static inline bool __access_ok(unsigned long addr, unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 	return addr < TASK_SIZE_MAX && size <= TASK_SIZE_MAX - addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #define access_ok(addr, size)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) 	(__chk_user_ptr(addr),		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 	 __access_ok((unsigned long)(addr), (size)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28)  * These are the main single-value transfer routines.  They automatically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  * use the right size if we just have the right pointer type.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  * This gets kind of ugly. We want to return _two_ values in "get_user()"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  * and yet we don't want to do any pointers, because that is too much
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  * of a performance impact. Thus we have a few rather ugly macros here,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  * and hide all the ugliness from the user.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36)  * The "__xxx" versions of the user access functions are versions that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37)  * do not verify the address space, that must have been done previously
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38)  * with a separate "access_ok()" call (this is used when we do multiple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39)  * accesses to the same area of user memory).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41)  * As we use the same address space for kernel and user data on the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42)  * PowerPC, we can just do these as direct assignments.  (Of course, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43)  * exception handling means that it's no longer "just"...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) #define get_user(x, ptr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	__get_user_check((x), (ptr), sizeof(*(ptr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) #define put_user(x, ptr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	__put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) #define __get_user(x, ptr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	__get_user_nocheck((x), (ptr), sizeof(*(ptr)), true)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) #define __put_user(x, ptr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	__put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) #define __put_user_goto(x, ptr, label) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	__put_user_nocheck_goto((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), label)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) #define __get_user_allowed(x, ptr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	__get_user_nocheck((x), (ptr), sizeof(*(ptr)), false)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) #define __get_user_inatomic(x, ptr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 	__get_user_nosleep((x), (ptr), sizeof(*(ptr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) #define __put_user_inatomic(x, ptr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	__put_user_nosleep((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) #ifdef CONFIG_PPC64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) #define ___get_user_instr(gu_op, dest, ptr)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) ({									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	long __gui_ret = 0;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	unsigned long __gui_ptr = (unsigned long)ptr;			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	struct ppc_inst __gui_inst;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	unsigned int __prefix, __suffix;				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	__gui_ret = gu_op(__prefix, (unsigned int __user *)__gui_ptr);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 	if (__gui_ret == 0) {						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 		if ((__prefix >> 26) == OP_PREFIX) {			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 			__gui_ret = gu_op(__suffix,			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 				(unsigned int __user *)__gui_ptr + 1);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 			__gui_inst = ppc_inst_prefix(__prefix,		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 						     __suffix);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 		} else {						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 			__gui_inst = ppc_inst(__prefix);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 		}							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 		if (__gui_ret == 0)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 			(dest) = __gui_inst;				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	}								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 	__gui_ret;							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) #define get_user_instr(x, ptr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	___get_user_instr(get_user, x, ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) #define __get_user_instr(x, ptr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	___get_user_instr(__get_user, x, ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) #define __get_user_instr_inatomic(x, ptr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	___get_user_instr(__get_user_inatomic, x, ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) #else /* !CONFIG_PPC64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define get_user_instr(x, ptr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	get_user((x).val, (u32 __user *)(ptr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #define __get_user_instr(x, ptr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	__get_user_nocheck((x).val, (u32 __user *)(ptr), sizeof(u32), true)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define __get_user_instr_inatomic(x, ptr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	__get_user_nosleep((x).val, (u32 __user *)(ptr), sizeof(u32))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #endif /* CONFIG_PPC64 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) extern long __put_user_bad(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #define __put_user_size_allowed(x, ptr, size, retval)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) do {								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	__label__ __pu_failed;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 	retval = 0;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	__put_user_size_goto(x, ptr, size, __pu_failed);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	break;							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) __pu_failed:							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	retval = -EFAULT;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #define __put_user_size(x, ptr, size, retval)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) do {								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	allow_write_to_user(ptr, size);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	__put_user_size_allowed(x, ptr, size, retval);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	prevent_write_to_user(ptr, size);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) #define __put_user_nocheck(x, ptr, size)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) ({								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	long __pu_err;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	__typeof__(*(ptr)) __user *__pu_addr = (ptr);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	__typeof__(*(ptr)) __pu_val = (x);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	__typeof__(size) __pu_size = (size);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	if (!is_kernel_addr((unsigned long)__pu_addr))		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 		might_fault();					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 	__chk_user_ptr(__pu_addr);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 	__put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	__pu_err;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #define __put_user_check(x, ptr, size)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) ({									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) 	long __pu_err = -EFAULT;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	__typeof__(*(ptr)) __user *__pu_addr = (ptr);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	__typeof__(*(ptr)) __pu_val = (x);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	__typeof__(size) __pu_size = (size);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	might_fault();							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	if (access_ok(__pu_addr, __pu_size))				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 		__put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	__pu_err;							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) #define __put_user_nosleep(x, ptr, size)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) ({								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	long __pu_err;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	__typeof__(*(ptr)) __user *__pu_addr = (ptr);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	__typeof__(*(ptr)) __pu_val = (x);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	__typeof__(size) __pu_size = (size);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	__chk_user_ptr(__pu_addr);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	__put_user_size(__pu_val, __pu_addr, __pu_size, __pu_err); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	__pu_err;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)  * We don't tell gcc that we are accessing memory, but this is OK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)  * because we do not write to any memory gcc knows about, so there
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)  * are no aliasing issues.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) #define __put_user_asm_goto(x, addr, label, op)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	asm_volatile_goto(					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 		"1:	" op "%U1%X1 %0,%1	# put_user\n"	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 		EX_TABLE(1b, %l2)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 		:						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 		: "r" (x), "m"UPD_CONSTR (*addr)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 		:						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 		: label)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) #ifdef __powerpc64__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) #define __put_user_asm2_goto(x, ptr, label)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 	__put_user_asm_goto(x, ptr, label, "std")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) #else /* __powerpc64__ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) #define __put_user_asm2_goto(x, addr, label)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	asm_volatile_goto(					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 		"1:	stw%X1 %0, %1\n"			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 		"2:	stw%X1 %L0, %L1\n"			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 		EX_TABLE(1b, %l2)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 		EX_TABLE(2b, %l2)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 		:						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 		: "r" (x), "m" (*addr)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		:						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 		: label)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) #endif /* __powerpc64__ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) #define __put_user_size_goto(x, ptr, size, label)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) do {								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	switch (size) {						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	case 1: __put_user_asm_goto(x, ptr, label, "stb"); break;	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	case 2: __put_user_asm_goto(x, ptr, label, "sth"); break;	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	case 4: __put_user_asm_goto(x, ptr, label, "stw"); break;	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	case 8: __put_user_asm2_goto(x, ptr, label); break;	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	default: __put_user_bad();				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	}							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) #define __put_user_nocheck_goto(x, ptr, size, label)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) do {								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	__typeof__(*(ptr)) __user *__pu_addr = (ptr);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	__chk_user_ptr(ptr);					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	__put_user_size_goto((x), __pu_addr, (size), label);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) extern long __get_user_bad(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)  * This does an atomic 128 byte aligned load from userspace.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)  * Upto caller to do enable_kernel_vmx() before calling!
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) #define __get_user_atomic_128_aligned(kaddr, uaddr, err)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	__asm__ __volatile__(				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 		".machine push\n"			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 		".machine altivec\n"			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 		"1:	lvx  0,0,%1	# get user\n"	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 		" 	stvx 0,0,%2	# put kernel\n"	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 		".machine pop\n"			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 		"2:\n"					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 		".section .fixup,\"ax\"\n"		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 		"3:	li %0,%3\n"			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 		"	b 2b\n"				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 		".previous\n"				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 		EX_TABLE(1b, 3b)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 		: "=r" (err)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 		: "b" (uaddr), "b" (kaddr), "i" (-EFAULT), "0" (err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) #define __get_user_asm(x, addr, err, op)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	__asm__ __volatile__(				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 		"1:	"op"%U2%X2 %1, %2	# get_user\n"	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 		"2:\n"					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 		".section .fixup,\"ax\"\n"		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 		"3:	li %0,%3\n"			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 		"	li %1,0\n"			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 		"	b 2b\n"				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 		".previous\n"				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 		EX_TABLE(1b, 3b)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 		: "=r" (err), "=r" (x)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 		: "m"UPD_CONSTR (*addr), "i" (-EFAULT), "0" (err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) #ifdef __powerpc64__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) #define __get_user_asm2(x, addr, err)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	__get_user_asm(x, addr, err, "ld")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) #else /* __powerpc64__ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) #define __get_user_asm2(x, addr, err)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	__asm__ __volatile__(				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 		"1:	lwz%X2 %1, %2\n"			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 		"2:	lwz%X2 %L1, %L2\n"		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 		"3:\n"					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 		".section .fixup,\"ax\"\n"		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 		"4:	li %0,%3\n"			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 		"	li %1,0\n"			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 		"	li %1+1,0\n"			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 		"	b 3b\n"				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 		".previous\n"				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 		EX_TABLE(1b, 4b)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 		EX_TABLE(2b, 4b)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 		: "=r" (err), "=&r" (x)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 		: "m" (*addr), "i" (-EFAULT), "0" (err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) #endif /* __powerpc64__ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) #define __get_user_size_allowed(x, ptr, size, retval)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) do {								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	retval = 0;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	__chk_user_ptr(ptr);					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	if (size > sizeof(x))					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 		(x) = __get_user_bad();				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	switch (size) {						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	case 1: __get_user_asm(x, (u8 __user *)ptr, retval, "lbz"); break;	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	case 2: __get_user_asm(x, (u16 __user *)ptr, retval, "lhz"); break;	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	case 4: __get_user_asm(x, (u32 __user *)ptr, retval, "lwz"); break;	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	case 8: __get_user_asm2(x, (u64 __user *)ptr, retval);  break;	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	default: (x) = __get_user_bad();			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	}							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) #define __get_user_size(x, ptr, size, retval)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) do {								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	allow_read_from_user(ptr, size);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	__get_user_size_allowed(x, ptr, size, retval);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	prevent_read_from_user(ptr, size);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)  * This is a type: either unsigned long, if the argument fits into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)  * that type, or otherwise unsigned long long.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) #define __long_type(x) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	__typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) #define __get_user_nocheck(x, ptr, size, do_allow)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) ({								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	long __gu_err;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	__long_type(*(ptr)) __gu_val;				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	__typeof__(*(ptr)) __user *__gu_addr = (ptr);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	__typeof__(size) __gu_size = (size);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	__chk_user_ptr(__gu_addr);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	if (do_allow && !is_kernel_addr((unsigned long)__gu_addr)) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 		might_fault();					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	barrier_nospec();					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	if (do_allow)								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 		__get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	else									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 		__get_user_size_allowed(__gu_val, __gu_addr, __gu_size, __gu_err); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	(x) = (__typeof__(*(ptr)))__gu_val;			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	__gu_err;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) #define __get_user_check(x, ptr, size)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) ({									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	long __gu_err = -EFAULT;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	__long_type(*(ptr)) __gu_val = 0;				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	__typeof__(*(ptr)) __user *__gu_addr = (ptr);		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	__typeof__(size) __gu_size = (size);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	might_fault();							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	if (access_ok(__gu_addr, __gu_size)) {				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 		barrier_nospec();					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 		__get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 	}								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 	(x) = (__force __typeof__(*(ptr)))__gu_val;				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 	__gu_err;							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) #define __get_user_nosleep(x, ptr, size)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) ({								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 	long __gu_err;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 	__long_type(*(ptr)) __gu_val;				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	__typeof__(*(ptr)) __user *__gu_addr = (ptr);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	__typeof__(size) __gu_size = (size);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	__chk_user_ptr(__gu_addr);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	barrier_nospec();					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 	__get_user_size(__gu_val, __gu_addr, __gu_size, __gu_err); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	(x) = (__force __typeof__(*(ptr)))__gu_val;			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	__gu_err;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) /* more complex routines */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) extern unsigned long __copy_tofrom_user(void __user *to,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 		const void __user *from, unsigned long size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) #ifdef CONFIG_ARCH_HAS_COPY_MC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) unsigned long __must_check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) copy_mc_generic(void *to, const void *from, unsigned long size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) static inline unsigned long __must_check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) copy_mc_to_kernel(void *to, const void *from, unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	return copy_mc_generic(to, from, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) #define copy_mc_to_kernel copy_mc_to_kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) static inline unsigned long __must_check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) copy_mc_to_user(void __user *to, const void *from, unsigned long n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 	if (likely(check_copy_size(from, n, true))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 		if (access_ok(to, n)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 			allow_write_to_user(to, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 			n = copy_mc_generic((void *)to, from, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 			prevent_write_to_user(to, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 	return n;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) #ifdef __powerpc64__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) static inline unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	unsigned long ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	barrier_nospec();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 	allow_read_write_user(to, from, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	ret = __copy_tofrom_user(to, from, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 	prevent_read_write_user(to, from, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) #endif /* __powerpc64__ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) static inline unsigned long raw_copy_from_user(void *to,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 		const void __user *from, unsigned long n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	unsigned long ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	if (__builtin_constant_p(n) && (n <= 8)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 		ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 		switch (n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 		case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 			barrier_nospec();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 			__get_user_size(*(u8 *)to, from, 1, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 		case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 			barrier_nospec();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 			__get_user_size(*(u16 *)to, from, 2, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 		case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 			barrier_nospec();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 			__get_user_size(*(u32 *)to, from, 4, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 		case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 			barrier_nospec();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 			__get_user_size(*(u64 *)to, from, 8, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 		if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 	barrier_nospec();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 	allow_read_from_user(from, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 	ret = __copy_tofrom_user((__force void __user *)to, from, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 	prevent_read_from_user(from, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) static inline unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) raw_copy_to_user_allowed(void __user *to, const void *from, unsigned long n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	if (__builtin_constant_p(n) && (n <= 8)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 		unsigned long ret = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 		switch (n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 		case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 			__put_user_size_allowed(*(u8 *)from, (u8 __user *)to, 1, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 		case 2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 			__put_user_size_allowed(*(u16 *)from, (u16 __user *)to, 2, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 		case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 			__put_user_size_allowed(*(u32 *)from, (u32 __user *)to, 4, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 		case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 			__put_user_size_allowed(*(u64 *)from, (u64 __user *)to, 8, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 		if (ret == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 			return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 	return __copy_tofrom_user(to, (__force const void __user *)from, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) static inline unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) raw_copy_to_user(void __user *to, const void *from, unsigned long n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	unsigned long ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	allow_write_to_user(to, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 	ret = raw_copy_to_user_allowed(to, from, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	prevent_write_to_user(to, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) unsigned long __arch_clear_user(void __user *addr, unsigned long size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) static inline unsigned long clear_user(void __user *addr, unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 	unsigned long ret = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 	might_fault();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 	if (likely(access_ok(addr, size))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 		allow_write_to_user(addr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 		ret = __arch_clear_user(addr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 		prevent_write_to_user(addr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) static inline unsigned long __clear_user(void __user *addr, unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 	return clear_user(addr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) extern long strncpy_from_user(char *dst, const char __user *src, long count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) extern __must_check long strnlen_user(const char __user *str, long n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) extern long __copy_from_user_flushcache(void *dst, const void __user *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 		unsigned size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) extern void memcpy_page_flushcache(char *to, struct page *page, size_t offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 			   size_t len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) static __must_check inline bool user_access_begin(const void __user *ptr, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 	if (unlikely(!access_ok(ptr, len)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 	might_fault();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 	allow_read_write_user((void __user *)ptr, ptr, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) #define user_access_begin	user_access_begin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) #define user_access_end		prevent_current_access_user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) #define user_access_save	prevent_user_access_return
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) #define user_access_restore	restore_user_access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) static __must_check inline bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) user_read_access_begin(const void __user *ptr, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 	if (unlikely(!access_ok(ptr, len)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 	might_fault();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 	allow_read_from_user(ptr, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) #define user_read_access_begin	user_read_access_begin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) #define user_read_access_end		prevent_current_read_from_user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) static __must_check inline bool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) user_write_access_begin(const void __user *ptr, size_t len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 	if (unlikely(!access_ok(ptr, len)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 	might_fault();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 	allow_write_to_user((void __user *)ptr, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 	return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) #define user_write_access_begin	user_write_access_begin
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) #define user_write_access_end		prevent_current_write_to_user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) #define unsafe_op_wrap(op, err) do { if (unlikely(op)) goto err; } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) #define unsafe_get_user(x, p, e) unsafe_op_wrap(__get_user_allowed(x, p), e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) #define unsafe_put_user(x, p, e) __put_user_goto(x, p, e)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) #define unsafe_copy_to_user(d, s, l, e) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) do {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 	u8 __user *_dst = (u8 __user *)(d);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 	const u8 *_src = (const u8 *)(s);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 	size_t _len = (l);						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 	int _i;								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 	for (_i = 0; _i < (_len & ~(sizeof(long) - 1)); _i += sizeof(long))		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 		__put_user_goto(*(long*)(_src + _i), (long __user *)(_dst + _i), e);\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 	if (IS_ENABLED(CONFIG_PPC64) && (_len & 4)) {			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 		__put_user_goto(*(u32*)(_src + _i), (u32 __user *)(_dst + _i), e);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 		_i += 4;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 	}								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 	if (_len & 2) {							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 		__put_user_goto(*(u16*)(_src + _i), (u16 __user *)(_dst + _i), e);	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 		_i += 2;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 	}								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 	if (_len & 1) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 		__put_user_goto(*(u8*)(_src + _i), (u8 __user *)(_dst + _i), e);\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) #define HAVE_GET_KERNEL_NOFAULT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) #define __get_kernel_nofault(dst, src, type, err_label)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) do {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 	int __kr_err;							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 	__get_user_size_allowed(*((type *)(dst)), (__force type __user *)(src),\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 			sizeof(type), __kr_err);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 	if (unlikely(__kr_err))						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 		goto err_label;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) #define __put_kernel_nofault(dst, src, type, err_label)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 	__put_user_size_goto(*((type *)(src)),				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 		(__force type __user *)(dst), sizeof(type), err_label)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) #endif	/* _ARCH_POWERPC_UACCESS_H */