^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #ifndef __PARISC_UACCESS_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define __PARISC_UACCESS_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * User space memory access functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <asm/cache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/bug.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #define KERNEL_DS ((mm_segment_t){0})
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #define USER_DS ((mm_segment_t){1})
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #define get_fs() (current_thread_info()->addr_limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #define set_fs(x) (current_thread_info()->addr_limit = (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * Note that since kernel addresses are in a separate address space on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * parisc, we don't need to do anything for access_ok().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * We just let the page fault handler do the right thing. This also means
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * that put_user is the same as __put_user, etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #define access_ok(uaddr, size) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) ( (uaddr) == (uaddr) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define put_user __put_user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #define get_user __get_user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #if !defined(CONFIG_64BIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #define LDD_USER(val, ptr) __get_user_asm64(val, ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define STD_USER(x, ptr) __put_user_asm64(x, ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define LDD_USER(val, ptr) __get_user_asm(val, "ldd", ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define STD_USER(x, ptr) __put_user_asm("std", x, ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * The exception table contains two values: the first is the relative offset to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * the address of the instruction that is allowed to fault, and the second is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * the relative offset to the address of the fixup routine. Since relative
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * addresses are used, 32bit values are sufficient even on 64bit kernel.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #define ARCH_HAS_RELATIVE_EXTABLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) struct exception_table_entry {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) int insn; /* relative address of insn that is allowed to fault. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) int fixup; /* relative address of fixup routine */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr )\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) ".section __ex_table,\"aw\"\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) ".word (" #fault_addr " - .), (" #except_addr " - .)\n\t" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) ".previous\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() creates a special exception table entry
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * (with lowest bit set) for which the fault handler in fixup_exception() will
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) * load -EFAULT into %r8 for a read or write fault, and zeroes the target
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * register in case of a read fault in get_user().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define ASM_EXCEPTIONTABLE_ENTRY_EFAULT( fault_addr, except_addr )\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * load_sr2() preloads the space register %%sr2 - based on the value of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * get_fs() - with either a value of 0 to access kernel space (KERNEL_DS which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * is 0), or with the current value of %%sr3 to access user space (USER_DS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * memory. The following __get_user_asm() and __put_user_asm() functions have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * %%sr2 hard-coded to access the requested memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define load_sr2() \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) __asm__(" or,= %0,%%r0,%%r0\n\t" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) " mfsp %%sr3,%0\n\t" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) " mtsp %0,%%sr2\n\t" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) : : "r"(get_fs()) : )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define __get_user_internal(val, ptr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) register long __gu_err __asm__ ("r8") = 0; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) switch (sizeof(*(ptr))) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) case 1: __get_user_asm(val, "ldb", ptr); break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) case 2: __get_user_asm(val, "ldh", ptr); break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) case 4: __get_user_asm(val, "ldw", ptr); break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) case 8: LDD_USER(val, ptr); break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) default: BUILD_BUG(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) __gu_err; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define __get_user(val, ptr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) load_sr2(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) __get_user_internal(val, ptr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define __get_user_asm(val, ldx, ptr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) register long __gu_val; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) __asm__("1: " ldx " 0(%%sr2,%2),%0\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) "9:\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) : "=r"(__gu_val), "=r"(__gu_err) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) : "r"(ptr), "1"(__gu_err)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) (val) = (__force __typeof__(*(ptr))) __gu_val; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #if !defined(CONFIG_64BIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #define __get_user_asm64(val, ptr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) union { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) unsigned long long l; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) __typeof__(*(ptr)) t; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) } __gu_tmp; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) __asm__(" copy %%r0,%R0\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) "1: ldw 0(%%sr2,%2),%0\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) "2: ldw 4(%%sr2,%2),%R0\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) "9:\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) : "=&r"(__gu_tmp.l), "=r"(__gu_err) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) : "r"(ptr), "1"(__gu_err)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) (val) = __gu_tmp.t; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) #endif /* !defined(CONFIG_64BIT) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) #define __put_user_internal(x, ptr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) register long __pu_err __asm__ ("r8") = 0; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) __typeof__(*(ptr)) __x = (__typeof__(*(ptr)))(x); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) switch (sizeof(*(ptr))) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) case 1: __put_user_asm("stb", __x, ptr); break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) case 2: __put_user_asm("sth", __x, ptr); break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) case 4: __put_user_asm("stw", __x, ptr); break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) case 8: STD_USER(__x, ptr); break; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) default: BUILD_BUG(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) __pu_err; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) #define __put_user(x, ptr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) load_sr2(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) __put_user_internal(x, ptr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * The "__put_user/kernel_asm()" macros tell gcc they read from memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * instead of writing. This is because they do not write to any memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * gcc knows about, so there are no aliasing issues. These macros must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * also be aware that fixups are executed in the context of the fault,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * and any registers used there must be listed as clobbers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * r8 is already listed as err.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) #define __put_user_asm(stx, x, ptr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) __asm__ __volatile__ ( \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) "1: " stx " %2,0(%%sr2,%1)\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) "9:\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) : "=r"(__pu_err) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) : "r"(ptr), "r"(x), "0"(__pu_err))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) #if !defined(CONFIG_64BIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) #define __put_user_asm64(__val, ptr) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) __asm__ __volatile__ ( \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) "1: stw %2,0(%%sr2,%1)\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) "2: stw %R2,4(%%sr2,%1)\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) "9:\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) : "=r"(__pu_err) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) : "r"(ptr), "r"(__val), "0"(__pu_err)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) #endif /* !defined(CONFIG_64BIT) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * Complex access routines -- external declarations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) extern long strncpy_from_user(char *, const char __user *, long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) extern unsigned lclear_user(void __user *, unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) extern long lstrnlen_user(const char __user *, long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * Complex access routines -- macros
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) #define user_addr_max() (~0UL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) #define strnlen_user lstrnlen_user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) #define clear_user lclear_user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) #define __clear_user lclear_user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) unsigned long __must_check raw_copy_to_user(void __user *dst, const void *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) unsigned long len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) unsigned long __must_check raw_copy_from_user(void *dst, const void __user *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) unsigned long len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) unsigned long __must_check raw_copy_in_user(void __user *dst, const void __user *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) unsigned long len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) #define INLINE_COPY_TO_USER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) #define INLINE_COPY_FROM_USER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) struct pt_regs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) int fixup_exception(struct pt_regs *regs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) #endif /* __PARISC_UACCESS_H */