^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2011 Texas Instruments Incorporated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Author: Mark Salter <msalter@redhat.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #ifndef _ASM_C6X_UACCESS_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #define _ASM_C6X_UACCESS_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/compiler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * C6X supports unaligned 32 and 64 bit loads and stores.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) static inline __must_check unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) raw_copy_from_user(void *to, const void __user *from, unsigned long n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) u32 tmp32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) u64 tmp64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) if (__builtin_constant_p(n)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) switch (n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) *(u8 *)to = *(u8 __force *)from;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) asm volatile ("ldnw .d1t1 *%2,%0\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) "nop 4\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) "stnw .d1t1 %0,*%1\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) : "=&a"(tmp32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) : "A"(to), "a"(from)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) : "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) asm volatile ("ldndw .d1t1 *%2,%0\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) "nop 4\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) "stndw .d1t1 %0,*%1\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) : "=&a"(tmp64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) : "a"(to), "a"(from)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) : "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) memcpy(to, (const void __force *)from, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) static inline __must_check unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) raw_copy_to_user(void __user *to, const void *from, unsigned long n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) u32 tmp32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) u64 tmp64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) if (__builtin_constant_p(n)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) switch (n) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) case 1:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) *(u8 __force *)to = *(u8 *)from;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) case 4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) asm volatile ("ldnw .d1t1 *%2,%0\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) "nop 4\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) "stnw .d1t1 %0,*%1\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) : "=&a"(tmp32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) : "a"(to), "a"(from)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) : "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) case 8:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) asm volatile ("ldndw .d1t1 *%2,%0\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) "nop 4\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) "stndw .d1t1 %0,*%1\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) : "=&a"(tmp64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) : "a"(to), "a"(from)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) : "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) memcpy((void __force *)to, from, n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define INLINE_COPY_FROM_USER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #define INLINE_COPY_TO_USER
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) extern int _access_ok(unsigned long addr, unsigned long size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #ifdef CONFIG_ACCESS_CHECK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #define __access_ok _access_ok
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #include <asm-generic/uaccess.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #endif /* _ASM_C6X_UACCESS_H */