^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Port on Texas Instruments TMS320C6x architecture
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2004, 2009, 2010 Texas Instruments Incorporated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Rewritten for 2.6.3x: Mark Salter <msalter@redhat.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #ifndef _ASM_C6X_UNALIGNED_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #define _ASM_C6X_UNALIGNED_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/swab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/unaligned/generic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * The C64x+ can do unaligned word and dword accesses in hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * using special load/store instructions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) static inline u16 get_unaligned_le16(const void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) const u8 *_p = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) return _p[0] | _p[1] << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) static inline u16 get_unaligned_be16(const void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) const u8 *_p = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) return _p[0] << 8 | _p[1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) static inline void put_unaligned_le16(u16 val, void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) u8 *_p = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) _p[0] = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) _p[1] = val >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) static inline void put_unaligned_be16(u16 val, void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) u8 *_p = p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) _p[0] = val >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) _p[1] = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) static inline u32 get_unaligned32(const void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) u32 val = (u32) p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) asm (" ldnw .d1t1 *%0,%0\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) " nop 4\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) : "+a"(val));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) static inline void put_unaligned32(u32 val, void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) asm volatile (" stnw .d2t1 %0,*%1\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) : : "a"(val), "b"(p) : "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static inline u64 get_unaligned64(const void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) asm volatile (" ldndw .d1t1 *%1,%0\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) " nop 4\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) : "=a"(val) : "a"(p));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) static inline void put_unaligned64(u64 val, const void *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) asm volatile (" stndw .d2t1 %0,*%1\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) : : "a"(val), "b"(p) : "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #ifdef CONFIG_CPU_BIG_ENDIAN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #define get_unaligned_le32(p) __swab32(get_unaligned32(p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #define get_unaligned_le64(p) __swab64(get_unaligned64(p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #define get_unaligned_be32(p) get_unaligned32(p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define get_unaligned_be64(p) get_unaligned64(p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define put_unaligned_le32(v, p) put_unaligned32(__swab32(v), (p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #define put_unaligned_le64(v, p) put_unaligned64(__swab64(v), (p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #define put_unaligned_be32(v, p) put_unaligned32((v), (p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #define put_unaligned_be64(v, p) put_unaligned64((v), (p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #define get_unaligned __get_unaligned_be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #define put_unaligned __put_unaligned_be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #define get_unaligned_le32(p) get_unaligned32(p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #define get_unaligned_le64(p) get_unaligned64(p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #define get_unaligned_be32(p) __swab32(get_unaligned32(p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #define get_unaligned_be64(p) __swab64(get_unaligned64(p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #define put_unaligned_le32(v, p) put_unaligned32((v), (p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #define put_unaligned_le64(v, p) put_unaligned64((v), (p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #define put_unaligned_be32(v, p) put_unaligned32(__swab32(v), (p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #define put_unaligned_be64(v, p) put_unaligned64(__swab64(v), (p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define get_unaligned __get_unaligned_le
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define put_unaligned __put_unaligned_le
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #endif /* _ASM_C6X_UNALIGNED_H */