^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #ifndef __ASM_SH_UACCESS_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define __ASM_SH_UACCESS_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <asm/segment.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <asm/extable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #define __addr_ok(addr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) ((unsigned long __force)(addr) < current_thread_info()->addr_limit.seg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * __access_ok: Check if address with size is OK or not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * Uhhuh, this needs 33-bit arithmetic. We have a carry..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * sum := addr + size; carry? --> flag = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * if (sum >= addr_limit) flag = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #define __access_ok(addr, size) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) unsigned long __ao_a = (addr), __ao_b = (size); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) unsigned long __ao_end = __ao_a + __ao_b - !!__ao_b; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) __ao_end >= __ao_a && __addr_ok(__ao_end); })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #define access_ok(addr, size) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) (__chk_user_ptr(addr), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) __access_ok((unsigned long __force)(addr), (size)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define user_addr_max() (current_thread_info()->addr_limit.seg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * Uh, these should become the main single-value transfer routines ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * They automatically use the right size if we just have the right
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * pointer type ...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * As SuperH uses the same address space for kernel and user data, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * can just do these as direct assignments.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * Careful to not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * (a) re-use the arguments for side effects (sizeof is ok)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * (b) require any knowledge of processes at this stage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define put_user(x,ptr) __put_user_check((x), (ptr), sizeof(*(ptr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define get_user(x,ptr) __get_user_check((x), (ptr), sizeof(*(ptr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * The "__xxx" versions do not do address space checking, useful when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * doing multiple accesses to the same area (the user has to do the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * checks by hand with "access_ok()")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define __put_user(x,ptr) __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #define __get_user(x,ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct __large_struct { unsigned long buf[100]; };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #define __m(x) (*(struct __large_struct __user *)(x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define __get_user_nocheck(x,ptr,size) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) long __gu_err; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) unsigned long __gu_val; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) __chk_user_ptr(ptr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) (x) = (__force __typeof__(*(ptr)))__gu_val; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) __gu_err; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define __get_user_check(x,ptr,size) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) long __gu_err = -EFAULT; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) unsigned long __gu_val = 0; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) const __typeof__(*(ptr)) *__gu_addr = (ptr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) if (likely(access_ok(__gu_addr, (size)))) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) (x) = (__force __typeof__(*(ptr)))__gu_val; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) __gu_err; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define __put_user_nocheck(x,ptr,size) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) long __pu_err; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) __typeof__(*(ptr)) __pu_val = x; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) __chk_user_ptr(ptr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) __put_user_size(__pu_val, __pu_addr, (size), __pu_err); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) __pu_err; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define __put_user_check(x,ptr,size) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) long __pu_err = -EFAULT; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) __typeof__(*(ptr)) __pu_val = x; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) if (likely(access_ok(__pu_addr, size))) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) __put_user_size(__pu_val, __pu_addr, (size), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) __pu_err); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) __pu_err; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) # include <asm/uaccess_32.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) extern long strncpy_from_user(char *dest, const char __user *src, long count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) extern __must_check long strnlen_user(const char __user *str, long n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) /* Generic arbitrary sized copy. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /* Return the number of bytes NOT copied */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) static __always_inline unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) raw_copy_from_user(void *to, const void __user *from, unsigned long n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) return __copy_user(to, (__force void *)from, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static __always_inline unsigned long __must_check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) raw_copy_to_user(void __user *to, const void *from, unsigned long n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) return __copy_user((__force void *)to, from, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define INLINE_COPY_FROM_USER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #define INLINE_COPY_TO_USER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * Clear the area and return remaining number of bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * (on failure. Usually it's 0.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) __kernel_size_t __clear_user(void *addr, __kernel_size_t size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #define clear_user(addr,n) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) void __user * __cl_addr = (addr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) unsigned long __cl_size = (n); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) if (__cl_size && access_ok(__cl_addr, __cl_size)) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) __cl_size = __clear_user(__cl_addr, __cl_size); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) __cl_size; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) extern void *set_exception_table_vec(unsigned int vec, void *handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) static inline void *set_exception_table_evt(unsigned int evt, void *handler)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) return set_exception_table_vec(evt >> 5, handler);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) struct mem_access {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) unsigned long (*from)(void *dst, const void __user *src, unsigned long cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) unsigned long (*to)(void __user *dst, const void *src, unsigned long cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) struct mem_access *ma, int, unsigned long address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) #endif /* __ASM_SH_UACCESS_H */