^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #ifndef _ASM_ARC_IO_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #define _ASM_ARC_IO_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <asm/byteorder.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <asm/unaligned.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #ifdef CONFIG_ISA_ARCV2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <asm/barrier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #define __iormb() rmb()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #define __iowmb() wmb()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #define __iormb() do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #define __iowmb() do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) extern void __iomem *ioremap(phys_addr_t paddr, unsigned long size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) extern void __iomem *ioremap_prot(phys_addr_t paddr, unsigned long size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) unsigned long flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) return (void __iomem *)port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) static inline void ioport_unmap(void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) extern void iounmap(const void __iomem *addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) * io{read,write}{16,32}be() macros
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define ioread16be(p) ({ u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define ioread32be(p) ({ u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #define iowrite16be(v,p) ({ __iowmb(); __raw_writew((__force u16)cpu_to_be16(v), p); })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define iowrite32be(v,p) ({ __iowmb(); __raw_writel((__force u32)cpu_to_be32(v), p); })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) /* Change struct page to physical address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define __raw_readb __raw_readb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) static inline u8 __raw_readb(const volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) u8 b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) __asm__ __volatile__(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) " ldb%U1 %0, %1 \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) : "=r" (b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) : "m" (*(volatile u8 __force *)addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) : "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) return b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #define __raw_readw __raw_readw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) static inline u16 __raw_readw(const volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) u16 s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) __asm__ __volatile__(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) " ldw%U1 %0, %1 \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) : "=r" (s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) : "m" (*(volatile u16 __force *)addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) : "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) return s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #define __raw_readl __raw_readl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) static inline u32 __raw_readl(const volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) u32 w;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) __asm__ __volatile__(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) " ld%U1 %0, %1 \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) : "=r" (w)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) : "m" (*(volatile u32 __force *)addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) : "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) return w;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) * {read,write}s{b,w,l}() repeatedly access the same IO address in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) * native endianness in 8-, 16-, 32-bit chunks {into,from} memory,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) * @count times
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #define __raw_readsx(t,f) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) static inline void __raw_reads##f(const volatile void __iomem *addr, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) void *ptr, unsigned int count) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) bool is_aligned = ((unsigned long)ptr % ((t) / 8)) == 0; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) u##t *buf = ptr; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) if (!count) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) return; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /* Some ARC CPU's don't support unaligned accesses */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) if (is_aligned) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) u##t x = __raw_read##f(addr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) *buf++ = x; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) } while (--count); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) } else { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) u##t x = __raw_read##f(addr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) put_unaligned(x, buf++); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) } while (--count); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) #define __raw_readsb __raw_readsb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) __raw_readsx(8, b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #define __raw_readsw __raw_readsw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) __raw_readsx(16, w)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #define __raw_readsl __raw_readsl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) __raw_readsx(32, l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #define __raw_writeb __raw_writeb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) static inline void __raw_writeb(u8 b, volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) __asm__ __volatile__(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) " stb%U1 %0, %1 \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) : "r" (b), "m" (*(volatile u8 __force *)addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) : "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #define __raw_writew __raw_writew
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) static inline void __raw_writew(u16 s, volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) __asm__ __volatile__(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) " stw%U1 %0, %1 \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) : "r" (s), "m" (*(volatile u16 __force *)addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) : "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) #define __raw_writel __raw_writel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) static inline void __raw_writel(u32 w, volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) __asm__ __volatile__(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) " st%U1 %0, %1 \n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) : "r" (w), "m" (*(volatile u32 __force *)addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) : "memory");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) #define __raw_writesx(t,f) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) static inline void __raw_writes##f(volatile void __iomem *addr, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) const void *ptr, unsigned int count) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) bool is_aligned = ((unsigned long)ptr % ((t) / 8)) == 0; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) const u##t *buf = ptr; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) if (!count) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) return; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /* Some ARC CPU's don't support unaligned accesses */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) if (is_aligned) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) __raw_write##f(*buf++, addr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) } while (--count); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) } else { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) __raw_write##f(get_unaligned(buf++), addr); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) } while (--count); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) #define __raw_writesb __raw_writesb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) __raw_writesx(8, b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) #define __raw_writesw __raw_writesw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) __raw_writesx(16, w)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) #define __raw_writesl __raw_writesl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) __raw_writesx(32, l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * MMIO can also get buffered/optimized in micro-arch, so barriers needed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * Based on ARM model for the typical use case
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * <ST [DMA buffer]>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * <writel MMIO "go" reg>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * or:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * <readl MMIO "status" reg>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * <LD [DMA buffer]>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) * http://lkml.kernel.org/r/20150622133656.GG1583@arm.com
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) #define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) #define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) #define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) #define readsb(p,d,l) ({ __raw_readsb(p,d,l); __iormb(); })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) #define readsw(p,d,l) ({ __raw_readsw(p,d,l); __iormb(); })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) #define readsl(p,d,l) ({ __raw_readsl(p,d,l); __iormb(); })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) #define writeb(v,c) ({ __iowmb(); writeb_relaxed(v,c); })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) #define writew(v,c) ({ __iowmb(); writew_relaxed(v,c); })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) #define writel(v,c) ({ __iowmb(); writel_relaxed(v,c); })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) #define writesb(p,d,l) ({ __iowmb(); __raw_writesb(p,d,l); })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) #define writesw(p,d,l) ({ __iowmb(); __raw_writesw(p,d,l); })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) #define writesl(p,d,l) ({ __iowmb(); __raw_writesl(p,d,l); })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * Relaxed API for drivers which can handle barrier ordering themselves
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * Also these are defined to perform little endian accesses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * To provide the typical device register semantics of fixed endian,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) * swap the byte order for Big Endian
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * http://lkml.kernel.org/r/201603100845.30602.arnd@arndb.de
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) #define readb_relaxed(c) __raw_readb(c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) #define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) __raw_readw(c)); __r; })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) #define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) __raw_readl(c)); __r; })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) #define writeb_relaxed(v,c) __raw_writeb(v,c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) #define writew_relaxed(v,c) __raw_writew((__force u16) cpu_to_le16(v),c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) #define writel_relaxed(v,c) __raw_writel((__force u32) cpu_to_le32(v),c)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) #include <asm-generic/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) #endif /* _ASM_ARC_IO_H */