^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #ifndef __ALPHA_UACCESS_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define __ALPHA_UACCESS_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * The fs value determines whether argument validity checking should be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * performed or not. If get_fs() == USER_DS, checking is performed, with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * get_fs() == KERNEL_DS, checking is bypassed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Or at least it did once upon a time. Nowadays it is a mask that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * defines which bits of the address space are off limits. This is a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * wee bit faster than the above.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * For historical reasons, these macros are grossly misnamed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #define KERNEL_DS ((mm_segment_t) { 0UL })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #define USER_DS ((mm_segment_t) { -0x40000000000UL })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #define get_fs() (current_thread_info()->addr_limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #define set_fs(x) (current_thread_info()->addr_limit = (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * Is a address valid? This does a straightforward calculation rather
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * than tests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * Address valid if:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * - "addr" doesn't have any high-bits set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * - AND "size" doesn't have any high-bits set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * - AND "addr+size-(size != 0)" doesn't have any high-bits set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * - OR we are in kernel mode.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #define __access_ok(addr, size) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) unsigned long __ao_a = (addr), __ao_b = (size); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) unsigned long __ao_end = __ao_a + __ao_b - !!__ao_b; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) (get_fs().seg & (__ao_a | __ao_b | __ao_end)) == 0; })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define access_ok(addr, size) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) __chk_user_ptr(addr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) __access_ok(((unsigned long)(addr)), (size)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * These are the main single-value transfer routines. They automatically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * use the right size if we just have the right pointer type.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * As the alpha uses the same address space for kernel and user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * data, we can just do these as direct assignments. (Of course, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * exception handling means that it's no longer "just"...)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * Careful to not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * (a) re-use the arguments for side effects (sizeof/typeof is ok)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * (b) require any knowledge of processes at this stage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define put_user(x, ptr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define get_user(x, ptr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) __get_user_check((x), (ptr), sizeof(*(ptr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * The "__xxx" versions do not do address space checking, useful when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * doing multiple accesses to the same area (the programmer has to do the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * checks by hand with "access_ok()")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define __put_user(x, ptr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define __get_user(x, ptr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * The "lda %1, 2b-1b(%0)" bits are magic to get the assembler to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * encode the bits we need for resolving the exception. See the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * more extensive comments with fixup_inline_exception below for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * more information.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define EXC(label,cont,res,err) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) ".section __ex_table,\"a\"\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) " .long "#label"-.\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) " lda "#res","#cont"-"#label"("#err")\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) ".previous\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) extern void __get_user_unknown(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define __get_user_nocheck(x, ptr, size) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) long __gu_err = 0; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) unsigned long __gu_val; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) __chk_user_ptr(ptr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) switch (size) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) case 1: __get_user_8(ptr); break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) case 2: __get_user_16(ptr); break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) case 4: __get_user_32(ptr); break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) case 8: __get_user_64(ptr); break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) default: __get_user_unknown(); break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) (x) = (__force __typeof__(*(ptr))) __gu_val; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) __gu_err; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #define __get_user_check(x, ptr, size) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) long __gu_err = -EFAULT; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) unsigned long __gu_val = 0; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) if (__access_ok((unsigned long)__gu_addr, size)) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) __gu_err = 0; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) switch (size) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) case 1: __get_user_8(__gu_addr); break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) case 2: __get_user_16(__gu_addr); break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) case 4: __get_user_32(__gu_addr); break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) case 8: __get_user_64(__gu_addr); break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) default: __get_user_unknown(); break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) (x) = (__force __typeof__(*(ptr))) __gu_val; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) __gu_err; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) struct __large_struct { unsigned long buf[100]; };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #define __m(x) (*(struct __large_struct __user *)(x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #define __get_user_64(addr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) __asm__("1: ldq %0,%2\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) "2:\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) EXC(1b,2b,%0,%1) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) : "=r"(__gu_val), "=r"(__gu_err) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) : "m"(__m(addr)), "1"(__gu_err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) #define __get_user_32(addr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) __asm__("1: ldl %0,%2\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) "2:\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) EXC(1b,2b,%0,%1) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) : "=r"(__gu_val), "=r"(__gu_err) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) : "m"(__m(addr)), "1"(__gu_err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #ifdef __alpha_bwx__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) /* Those lucky bastards with ev56 and later CPUs can do byte/word moves. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) #define __get_user_16(addr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) __asm__("1: ldwu %0,%2\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) "2:\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) EXC(1b,2b,%0,%1) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) : "=r"(__gu_val), "=r"(__gu_err) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) : "m"(__m(addr)), "1"(__gu_err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) #define __get_user_8(addr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) __asm__("1: ldbu %0,%2\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) "2:\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) EXC(1b,2b,%0,%1) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) : "=r"(__gu_val), "=r"(__gu_err) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) : "m"(__m(addr)), "1"(__gu_err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) /* Unfortunately, we can't get an unaligned access trap for the sub-word
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) load, so we have to do a general unaligned operation. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) #define __get_user_16(addr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) long __gu_tmp; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) __asm__("1: ldq_u %0,0(%3)\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) "2: ldq_u %1,1(%3)\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) " extwl %0,%3,%0\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) " extwh %1,%3,%1\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) " or %0,%1,%0\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) "3:\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) EXC(1b,3b,%0,%2) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) EXC(2b,3b,%0,%2) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) : "=&r"(__gu_val), "=&r"(__gu_tmp), "=r"(__gu_err) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) : "r"(addr), "2"(__gu_err)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) #define __get_user_8(addr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) __asm__("1: ldq_u %0,0(%2)\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) " extbl %0,%2,%0\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) "2:\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) EXC(1b,2b,%0,%1) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) : "=&r"(__gu_val), "=r"(__gu_err) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) : "r"(addr), "1"(__gu_err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) extern void __put_user_unknown(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) #define __put_user_nocheck(x, ptr, size) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) long __pu_err = 0; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) __chk_user_ptr(ptr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) switch (size) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) case 1: __put_user_8(x, ptr); break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) case 2: __put_user_16(x, ptr); break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) case 4: __put_user_32(x, ptr); break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) case 8: __put_user_64(x, ptr); break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) default: __put_user_unknown(); break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) __pu_err; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) #define __put_user_check(x, ptr, size) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) long __pu_err = -EFAULT; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) if (__access_ok((unsigned long)__pu_addr, size)) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) __pu_err = 0; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) switch (size) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) case 1: __put_user_8(x, __pu_addr); break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) case 2: __put_user_16(x, __pu_addr); break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) case 4: __put_user_32(x, __pu_addr); break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) case 8: __put_user_64(x, __pu_addr); break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) default: __put_user_unknown(); break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) __pu_err; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * The "__put_user_xx()" macros tell gcc they read from memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * instead of writing: this is because they do not write to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * any memory gcc knows about, so there are no aliasing issues
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) #define __put_user_64(x, addr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) __asm__ __volatile__("1: stq %r2,%1\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) "2:\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) EXC(1b,2b,$31,%0) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) : "=r"(__pu_err) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) : "m" (__m(addr)), "rJ" (x), "0"(__pu_err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) #define __put_user_32(x, addr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) __asm__ __volatile__("1: stl %r2,%1\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) "2:\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) EXC(1b,2b,$31,%0) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) : "=r"(__pu_err) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) : "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) #ifdef __alpha_bwx__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) /* Those lucky bastards with ev56 and later CPUs can do byte/word moves. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) #define __put_user_16(x, addr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) __asm__ __volatile__("1: stw %r2,%1\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) "2:\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) EXC(1b,2b,$31,%0) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) : "=r"(__pu_err) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) : "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) #define __put_user_8(x, addr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) __asm__ __volatile__("1: stb %r2,%1\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) "2:\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) EXC(1b,2b,$31,%0) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) : "=r"(__pu_err) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) : "m"(__m(addr)), "rJ"(x), "0"(__pu_err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) /* Unfortunately, we can't get an unaligned access trap for the sub-word
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) write, so we have to do a general unaligned operation. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) #define __put_user_16(x, addr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) long __pu_tmp1, __pu_tmp2, __pu_tmp3, __pu_tmp4; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) __asm__ __volatile__( \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) "1: ldq_u %2,1(%5)\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) "2: ldq_u %1,0(%5)\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) " inswh %6,%5,%4\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) " inswl %6,%5,%3\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) " mskwh %2,%5,%2\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) " mskwl %1,%5,%1\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) " or %2,%4,%2\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) " or %1,%3,%1\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) "3: stq_u %2,1(%5)\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) "4: stq_u %1,0(%5)\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) "5:\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) EXC(1b,5b,$31,%0) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) EXC(2b,5b,$31,%0) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) EXC(3b,5b,$31,%0) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) EXC(4b,5b,$31,%0) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) : "=r"(__pu_err), "=&r"(__pu_tmp1), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) "=&r"(__pu_tmp2), "=&r"(__pu_tmp3), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) "=&r"(__pu_tmp4) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) : "r"(addr), "r"((unsigned long)(x)), "0"(__pu_err)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) #define __put_user_8(x, addr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) long __pu_tmp1, __pu_tmp2; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) __asm__ __volatile__( \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) "1: ldq_u %1,0(%4)\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) " insbl %3,%4,%2\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) " mskbl %1,%4,%1\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) " or %1,%2,%1\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) "2: stq_u %1,0(%4)\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) "3:\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) EXC(1b,3b,$31,%0) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) EXC(2b,3b,$31,%0) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) : "=r"(__pu_err), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) "=&r"(__pu_tmp1), "=&r"(__pu_tmp2) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) : "r"((unsigned long)(x)), "r"(addr), "0"(__pu_err)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * Complex access routines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) extern long __copy_user(void *to, const void *from, long len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) static inline unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) raw_copy_from_user(void *to, const void __user *from, unsigned long len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) return __copy_user(to, (__force const void *)from, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) static inline unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) raw_copy_to_user(void __user *to, const void *from, unsigned long len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) return __copy_user((__force void *)to, from, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) extern long __clear_user(void __user *to, long len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) extern inline long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) clear_user(void __user *to, long len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) if (__access_ok((unsigned long)to, len))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) len = __clear_user(to, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) return len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) #define user_addr_max() \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) (uaccess_kernel() ? ~0UL : TASK_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) extern long strncpy_from_user(char *dest, const char __user *src, long count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) extern __must_check long strnlen_user(const char __user *str, long n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) #include <asm/extable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) #endif /* __ALPHA_UACCESS_H */