^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-or-later */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /* Generic I/O port emulation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Written by David Howells (dhowells@redhat.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #ifndef __ASM_GENERIC_IO_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #define __ASM_GENERIC_IO_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <asm/page.h> /* I/O is all done through memory accesses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/string.h> /* for memset() and memcpy() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #ifdef CONFIG_GENERIC_IOMAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <asm-generic/iomap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <asm/mmiowb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <asm-generic/pci_iomap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #ifndef __io_br
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define __io_br() barrier()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) /* prevent prefetching of coherent DMA data ahead of a dma-complete */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #ifndef __io_ar
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #ifdef rmb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define __io_ar(v) rmb()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define __io_ar(v) barrier()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) /* flush writes to coherent DMA data before possibly triggering a DMA read */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #ifndef __io_bw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #ifdef wmb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define __io_bw() wmb()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define __io_bw() barrier()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) /* serialize device access against a spin_unlock, usually handled there. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #ifndef __io_aw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define __io_aw() mmiowb_set_pending()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #ifndef __io_pbw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #define __io_pbw() __io_bw()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #ifndef __io_paw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define __io_paw() __io_aw()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #ifndef __io_pbr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #define __io_pbr() __io_br()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #ifndef __io_par
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) #define __io_par(v) __io_ar(v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * __raw_{read,write}{b,w,l,q}() access memory in native endianness.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * On some architectures memory mapped IO needs to be accessed differently.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * On the simple architectures, we just read/write the memory location
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * directly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #ifndef __raw_readb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #define __raw_readb __raw_readb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) static inline u8 __raw_readb(const volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) return *(const volatile u8 __force *)addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #ifndef __raw_readw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #define __raw_readw __raw_readw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) static inline u16 __raw_readw(const volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) return *(const volatile u16 __force *)addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #ifndef __raw_readl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) #define __raw_readl __raw_readl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) static inline u32 __raw_readl(const volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) return *(const volatile u32 __force *)addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #ifdef CONFIG_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #ifndef __raw_readq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #define __raw_readq __raw_readq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) static inline u64 __raw_readq(const volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) return *(const volatile u64 __force *)addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #endif /* CONFIG_64BIT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #ifndef __raw_writeb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #define __raw_writeb __raw_writeb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) static inline void __raw_writeb(u8 value, volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) *(volatile u8 __force *)addr = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #ifndef __raw_writew
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #define __raw_writew __raw_writew
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) static inline void __raw_writew(u16 value, volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) *(volatile u16 __force *)addr = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #ifndef __raw_writel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) #define __raw_writel __raw_writel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) static inline void __raw_writel(u32 value, volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) *(volatile u32 __force *)addr = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) #ifdef CONFIG_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) #ifndef __raw_writeq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #define __raw_writeq __raw_writeq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) static inline void __raw_writeq(u64 value, volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) *(volatile u64 __force *)addr = value;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #endif /* CONFIG_64BIT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * {read,write}{b,w,l,q}() access little endian memory and return result in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * native endianness.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) #ifndef readb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #define readb readb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) static inline u8 readb(const volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) u8 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) __io_br();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) val = __raw_readb(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) __io_ar(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) #ifndef readw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) #define readw readw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) static inline u16 readw(const volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) u16 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) __io_br();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) val = __le16_to_cpu((__le16 __force)__raw_readw(addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) __io_ar(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) #ifndef readl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) #define readl readl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) static inline u32 readl(const volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) __io_br();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) val = __le32_to_cpu((__le32 __force)__raw_readl(addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) __io_ar(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) #ifdef CONFIG_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) #ifndef readq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) #define readq readq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) static inline u64 readq(const volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) __io_br();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) val = __le64_to_cpu(__raw_readq(addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) __io_ar(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) #endif /* CONFIG_64BIT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) #ifndef writeb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) #define writeb writeb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) static inline void writeb(u8 value, volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) __io_bw();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) __raw_writeb(value, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) __io_aw();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) #ifndef writew
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) #define writew writew
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) static inline void writew(u16 value, volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) __io_bw();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) __raw_writew((u16 __force)cpu_to_le16(value), addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) __io_aw();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) #ifndef writel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) #define writel writel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) static inline void writel(u32 value, volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) __io_bw();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) __raw_writel((u32 __force)__cpu_to_le32(value), addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) __io_aw();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) #ifdef CONFIG_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) #ifndef writeq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) #define writeq writeq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) static inline void writeq(u64 value, volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) __io_bw();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) __raw_writeq(__cpu_to_le64(value), addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) __io_aw();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) #endif /* CONFIG_64BIT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * {read,write}{b,w,l,q}_relaxed() are like the regular version, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * are not guaranteed to provide ordering against spinlocks or memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * accesses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) #ifndef readb_relaxed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) #define readb_relaxed readb_relaxed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) static inline u8 readb_relaxed(const volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) return __raw_readb(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) #ifndef readw_relaxed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) #define readw_relaxed readw_relaxed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) static inline u16 readw_relaxed(const volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) return __le16_to_cpu(__raw_readw(addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) #ifndef readl_relaxed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) #define readl_relaxed readl_relaxed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) static inline u32 readl_relaxed(const volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) return __le32_to_cpu(__raw_readl(addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) #if defined(readq) && !defined(readq_relaxed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) #define readq_relaxed readq_relaxed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) static inline u64 readq_relaxed(const volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) return __le64_to_cpu(__raw_readq(addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) #ifndef writeb_relaxed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) #define writeb_relaxed writeb_relaxed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) static inline void writeb_relaxed(u8 value, volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) __raw_writeb(value, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) #ifndef writew_relaxed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) #define writew_relaxed writew_relaxed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) static inline void writew_relaxed(u16 value, volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) __raw_writew(cpu_to_le16(value), addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) #ifndef writel_relaxed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) #define writel_relaxed writel_relaxed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) static inline void writel_relaxed(u32 value, volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) __raw_writel(__cpu_to_le32(value), addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) #if defined(writeq) && !defined(writeq_relaxed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) #define writeq_relaxed writeq_relaxed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) static inline void writeq_relaxed(u64 value, volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) __raw_writeq(__cpu_to_le64(value), addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * {read,write}s{b,w,l,q}() repeatedly access the same memory address in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * native endianness in 8-, 16-, 32- or 64-bit chunks (@count times).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) #ifndef readsb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) #define readsb readsb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) static inline void readsb(const volatile void __iomem *addr, void *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) if (count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) u8 *buf = buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) u8 x = __raw_readb(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) *buf++ = x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) } while (--count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) #ifndef readsw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) #define readsw readsw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) static inline void readsw(const volatile void __iomem *addr, void *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) if (count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) u16 *buf = buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) u16 x = __raw_readw(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) *buf++ = x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) } while (--count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) #ifndef readsl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) #define readsl readsl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) static inline void readsl(const volatile void __iomem *addr, void *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) if (count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) u32 *buf = buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) u32 x = __raw_readl(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) *buf++ = x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) } while (--count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) #ifdef CONFIG_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) #ifndef readsq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) #define readsq readsq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) static inline void readsq(const volatile void __iomem *addr, void *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) if (count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) u64 *buf = buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) u64 x = __raw_readq(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) *buf++ = x;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) } while (--count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) #endif /* CONFIG_64BIT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) #ifndef writesb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) #define writesb writesb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) static inline void writesb(volatile void __iomem *addr, const void *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) if (count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) const u8 *buf = buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) __raw_writeb(*buf++, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) } while (--count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) #ifndef writesw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) #define writesw writesw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) static inline void writesw(volatile void __iomem *addr, const void *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) if (count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) const u16 *buf = buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) __raw_writew(*buf++, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) } while (--count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) #ifndef writesl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) #define writesl writesl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) static inline void writesl(volatile void __iomem *addr, const void *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) if (count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) const u32 *buf = buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) __raw_writel(*buf++, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) } while (--count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) #ifdef CONFIG_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) #ifndef writesq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) #define writesq writesq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) static inline void writesq(volatile void __iomem *addr, const void *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) if (count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) const u64 *buf = buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) __raw_writeq(*buf++, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) } while (--count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) #endif /* CONFIG_64BIT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) #ifndef PCI_IOBASE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) #define PCI_IOBASE ((void __iomem *)0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) #ifndef IO_SPACE_LIMIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) #define IO_SPACE_LIMIT 0xffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) * {in,out}{b,w,l}() access little endian I/O. {in,out}{b,w,l}_p() can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) * implemented on hardware that needs an additional delay for I/O accesses to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * take effect.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) #if !defined(inb) && !defined(_inb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) #define _inb _inb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) static inline u8 _inb(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) u8 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) __io_pbr();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) val = __raw_readb(PCI_IOBASE + addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) __io_par(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) #if !defined(inw) && !defined(_inw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) #define _inw _inw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) static inline u16 _inw(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) u16 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) __io_pbr();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) val = __le16_to_cpu((__le16 __force)__raw_readw(PCI_IOBASE + addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) __io_par(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) #if !defined(inl) && !defined(_inl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) #define _inl _inl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) static inline u32 _inl(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) __io_pbr();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) val = __le32_to_cpu((__le32 __force)__raw_readl(PCI_IOBASE + addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) __io_par(val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) #if !defined(outb) && !defined(_outb)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) #define _outb _outb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) static inline void _outb(u8 value, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) __io_pbw();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) __raw_writeb(value, PCI_IOBASE + addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) __io_paw();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) #if !defined(outw) && !defined(_outw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) #define _outw _outw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) static inline void _outw(u16 value, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) __io_pbw();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) __raw_writew((u16 __force)cpu_to_le16(value), PCI_IOBASE + addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) __io_paw();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) #if !defined(outl) && !defined(_outl)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) #define _outl _outl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) static inline void _outl(u32 value, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) __io_pbw();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) __raw_writel((u32 __force)cpu_to_le32(value), PCI_IOBASE + addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) __io_paw();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) #include <linux/logic_pio.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) #ifndef inb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) #define inb _inb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) #ifndef inw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) #define inw _inw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) #ifndef inl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) #define inl _inl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) #ifndef outb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) #define outb _outb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) #ifndef outw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) #define outw _outw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) #ifndef outl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) #define outl _outl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) #ifndef inb_p
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) #define inb_p inb_p
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) static inline u8 inb_p(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) return inb(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) #ifndef inw_p
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) #define inw_p inw_p
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) static inline u16 inw_p(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) return inw(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) #ifndef inl_p
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) #define inl_p inl_p
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) static inline u32 inl_p(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) return inl(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) #ifndef outb_p
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) #define outb_p outb_p
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) static inline void outb_p(u8 value, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) outb(value, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) #ifndef outw_p
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) #define outw_p outw_p
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) static inline void outw_p(u16 value, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) outw(value, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) #ifndef outl_p
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) #define outl_p outl_p
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) static inline void outl_p(u32 value, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) outl(value, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) * {in,out}s{b,w,l}{,_p}() are variants of the above that repeatedly access a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) * single I/O port multiple times.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) #ifndef insb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) #define insb insb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) static inline void insb(unsigned long addr, void *buffer, unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) readsb(PCI_IOBASE + addr, buffer, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) #ifndef insw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) #define insw insw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) static inline void insw(unsigned long addr, void *buffer, unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) readsw(PCI_IOBASE + addr, buffer, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) #ifndef insl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) #define insl insl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) static inline void insl(unsigned long addr, void *buffer, unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) readsl(PCI_IOBASE + addr, buffer, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) #ifndef outsb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) #define outsb outsb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) static inline void outsb(unsigned long addr, const void *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) writesb(PCI_IOBASE + addr, buffer, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) #ifndef outsw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) #define outsw outsw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) static inline void outsw(unsigned long addr, const void *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) writesw(PCI_IOBASE + addr, buffer, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) #ifndef outsl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) #define outsl outsl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) static inline void outsl(unsigned long addr, const void *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) writesl(PCI_IOBASE + addr, buffer, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) #ifndef insb_p
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) #define insb_p insb_p
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) static inline void insb_p(unsigned long addr, void *buffer, unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) insb(addr, buffer, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) #ifndef insw_p
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) #define insw_p insw_p
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) static inline void insw_p(unsigned long addr, void *buffer, unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) insw(addr, buffer, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) #ifndef insl_p
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) #define insl_p insl_p
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) static inline void insl_p(unsigned long addr, void *buffer, unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) insl(addr, buffer, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) #ifndef outsb_p
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) #define outsb_p outsb_p
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) static inline void outsb_p(unsigned long addr, const void *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) outsb(addr, buffer, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) #ifndef outsw_p
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) #define outsw_p outsw_p
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) static inline void outsw_p(unsigned long addr, const void *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) outsw(addr, buffer, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) #ifndef outsl_p
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) #define outsl_p outsl_p
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) static inline void outsl_p(unsigned long addr, const void *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) outsl(addr, buffer, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) #ifndef CONFIG_GENERIC_IOMAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) #ifndef ioread8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) #define ioread8 ioread8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) static inline u8 ioread8(const volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) return readb(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) #ifndef ioread16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) #define ioread16 ioread16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) static inline u16 ioread16(const volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) return readw(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) #ifndef ioread32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) #define ioread32 ioread32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) static inline u32 ioread32(const volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) return readl(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) #ifdef CONFIG_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) #ifndef ioread64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) #define ioread64 ioread64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) static inline u64 ioread64(const volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) return readq(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) #endif /* CONFIG_64BIT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) #ifndef iowrite8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) #define iowrite8 iowrite8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) static inline void iowrite8(u8 value, volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) writeb(value, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) #ifndef iowrite16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) #define iowrite16 iowrite16
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) static inline void iowrite16(u16 value, volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) writew(value, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) #ifndef iowrite32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) #define iowrite32 iowrite32
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) static inline void iowrite32(u32 value, volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) writel(value, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) #ifdef CONFIG_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) #ifndef iowrite64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) #define iowrite64 iowrite64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) static inline void iowrite64(u64 value, volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) writeq(value, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) #endif /* CONFIG_64BIT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) #ifndef ioread16be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) #define ioread16be ioread16be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) static inline u16 ioread16be(const volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) return swab16(readw(addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) #ifndef ioread32be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) #define ioread32be ioread32be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) static inline u32 ioread32be(const volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) return swab32(readl(addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) #ifdef CONFIG_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) #ifndef ioread64be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) #define ioread64be ioread64be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) static inline u64 ioread64be(const volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) return swab64(readq(addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) #endif /* CONFIG_64BIT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) #ifndef iowrite16be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) #define iowrite16be iowrite16be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) static inline void iowrite16be(u16 value, void volatile __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) writew(swab16(value), addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) #ifndef iowrite32be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) #define iowrite32be iowrite32be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) static inline void iowrite32be(u32 value, volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) writel(swab32(value), addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) #ifdef CONFIG_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) #ifndef iowrite64be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) #define iowrite64be iowrite64be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) static inline void iowrite64be(u64 value, volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) writeq(swab64(value), addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) #endif /* CONFIG_64BIT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) #ifndef ioread8_rep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) #define ioread8_rep ioread8_rep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) static inline void ioread8_rep(const volatile void __iomem *addr, void *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) readsb(addr, buffer, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) #ifndef ioread16_rep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) #define ioread16_rep ioread16_rep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) static inline void ioread16_rep(const volatile void __iomem *addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) void *buffer, unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) readsw(addr, buffer, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) #ifndef ioread32_rep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) #define ioread32_rep ioread32_rep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) static inline void ioread32_rep(const volatile void __iomem *addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) void *buffer, unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) readsl(addr, buffer, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) #ifdef CONFIG_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) #ifndef ioread64_rep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) #define ioread64_rep ioread64_rep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) static inline void ioread64_rep(const volatile void __iomem *addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) void *buffer, unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) readsq(addr, buffer, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) #endif /* CONFIG_64BIT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) #ifndef iowrite8_rep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) #define iowrite8_rep iowrite8_rep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) static inline void iowrite8_rep(volatile void __iomem *addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) const void *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) writesb(addr, buffer, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) #ifndef iowrite16_rep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) #define iowrite16_rep iowrite16_rep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) static inline void iowrite16_rep(volatile void __iomem *addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) const void *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) writesw(addr, buffer, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) #ifndef iowrite32_rep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) #define iowrite32_rep iowrite32_rep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) static inline void iowrite32_rep(volatile void __iomem *addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) const void *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) writesl(addr, buffer, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) #ifdef CONFIG_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) #ifndef iowrite64_rep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) #define iowrite64_rep iowrite64_rep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) static inline void iowrite64_rep(volatile void __iomem *addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) const void *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) writesq(addr, buffer, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) #endif /* CONFIG_64BIT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) #endif /* CONFIG_GENERIC_IOMAP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) #ifdef __KERNEL__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) #define __io_virt(x) ((void __force *)(x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) * Change virtual addresses to physical addresses and vv.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) * These are pretty trivial
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) #ifndef virt_to_phys
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) #define virt_to_phys virt_to_phys
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) static inline unsigned long virt_to_phys(volatile void *address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) return __pa((unsigned long)address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) #ifndef phys_to_virt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) #define phys_to_virt phys_to_virt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) static inline void *phys_to_virt(unsigned long address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) return __va(address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) * DOC: ioremap() and ioremap_*() variants
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) * Architectures with an MMU are expected to provide ioremap() and iounmap()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) * themselves or rely on GENERIC_IOREMAP. For NOMMU architectures we provide
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) * a default nop-op implementation that expect that the physical address used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) * for MMIO are already marked as uncached, and can be used as kernel virtual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) * addresses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) * ioremap_wc() and ioremap_wt() can provide more relaxed caching attributes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) * for specific drivers if the architecture choses to implement them. If they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) * are not implemented we fall back to plain ioremap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) #ifndef CONFIG_MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) #ifndef ioremap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) #define ioremap ioremap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) static inline void __iomem *ioremap(phys_addr_t offset, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) return (void __iomem *)(unsigned long)offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) #ifndef iounmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) #define iounmap iounmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) static inline void iounmap(void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) #elif defined(CONFIG_GENERIC_IOREMAP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) #include <linux/pgtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) void iounmap(volatile void __iomem *addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) static inline void __iomem *ioremap(phys_addr_t addr, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) /* _PAGE_IOREMAP needs to be supplied by the architecture */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) return ioremap_prot(addr, size, _PAGE_IOREMAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) #endif /* !CONFIG_MMU || CONFIG_GENERIC_IOREMAP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) #ifndef ioremap_wc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) #define ioremap_wc ioremap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) #ifndef ioremap_wt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) #define ioremap_wt ioremap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) * ioremap_uc is special in that we do require an explicit architecture
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) * implementation. In general you do not want to use this function in a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) * driver and use plain ioremap, which is uncached by default. Similarly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) * architectures should not implement it unless they have a very good
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) * reason.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) #ifndef ioremap_uc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) #define ioremap_uc ioremap_uc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) #ifdef CONFIG_HAS_IOPORT_MAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) #ifndef CONFIG_GENERIC_IOMAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) #ifndef ioport_map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) #define ioport_map ioport_map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) port &= IO_SPACE_LIMIT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) return (port > MMIO_UPPER_LIMIT) ? NULL : PCI_IOBASE + port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) #define __pci_ioport_unmap __pci_ioport_unmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) static inline void __pci_ioport_unmap(void __iomem *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) uintptr_t start = (uintptr_t) PCI_IOBASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) uintptr_t addr = (uintptr_t) p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) if (addr >= start && addr < start + IO_SPACE_LIMIT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) iounmap(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) #ifndef ioport_unmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) #define ioport_unmap ioport_unmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) static inline void ioport_unmap(void __iomem *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) #else /* CONFIG_GENERIC_IOMAP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) extern void ioport_unmap(void __iomem *p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) #endif /* CONFIG_GENERIC_IOMAP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) #endif /* CONFIG_HAS_IOPORT_MAP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) #ifndef CONFIG_GENERIC_IOMAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) struct pci_dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) #ifndef __pci_ioport_unmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) static inline void __pci_ioport_unmap(void __iomem *p) {}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) #ifndef pci_iounmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) #define pci_iounmap pci_iounmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) __pci_ioport_unmap(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) #endif /* CONFIG_GENERIC_IOMAP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) * Convert a virtual cached pointer to an uncached pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) #ifndef xlate_dev_kmem_ptr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) #define xlate_dev_kmem_ptr xlate_dev_kmem_ptr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) static inline void *xlate_dev_kmem_ptr(void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) #ifndef xlate_dev_mem_ptr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) #define xlate_dev_mem_ptr xlate_dev_mem_ptr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) static inline void *xlate_dev_mem_ptr(phys_addr_t addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) return __va(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) #ifndef unxlate_dev_mem_ptr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) static inline void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) #ifdef CONFIG_VIRT_TO_BUS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) #ifndef virt_to_bus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) static inline unsigned long virt_to_bus(void *address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) return (unsigned long)address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) static inline void *bus_to_virt(unsigned long address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) return (void *)address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) #ifndef memset_io
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) #define memset_io memset_io
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) * memset_io Set a range of I/O memory to a constant value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) * @addr: The beginning of the I/O-memory range to set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) * @val: The value to set the memory to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) * @count: The number of bytes to set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) * Set a range of I/O memory to a given value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) static inline void memset_io(volatile void __iomem *addr, int value,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) memset(__io_virt(addr), value, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) #ifndef memcpy_fromio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) #define memcpy_fromio memcpy_fromio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) * memcpy_fromio Copy a block of data from I/O memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) * @dst: The (RAM) destination for the copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) * @src: The (I/O memory) source for the data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) * @count: The number of bytes to copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) * Copy a block of data from I/O memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) static inline void memcpy_fromio(void *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) const volatile void __iomem *addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) memcpy(buffer, __io_virt(addr), size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) #ifndef memcpy_toio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) #define memcpy_toio memcpy_toio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) * memcpy_toio Copy a block of data into I/O memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) * @dst: The (I/O memory) destination for the copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) * @src: The (RAM) source for the data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) * @count: The number of bytes to copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) * Copy a block of data to I/O memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) static inline void memcpy_toio(volatile void __iomem *addr, const void *buffer,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) memcpy(__io_virt(addr), buffer, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) #endif /* __KERNEL__ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) #endif /* __ASM_GENERIC_IO_H */