^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0-only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Based on arch/arm/include/asm/io.h
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 1996-2000 Russell King
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2012 ARM Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #ifndef __ASM_IO_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #define __ASM_IO_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/log_mmiorw.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/pgtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <asm/byteorder.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <asm/barrier.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <asm/memory.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <asm/early_ioremap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <asm/alternative.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <asm/cpufeature.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * Generic IO read/write. These perform native-endian accesses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #define __raw_writeb __raw_writeb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) log_write_mmio(val, 8, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) asm volatile("strb %w0, [%1]" : : "rZ" (val), "r" (addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #define __raw_writew __raw_writew
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) static inline void __raw_writew(u16 val, volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) log_write_mmio(val, 16, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) asm volatile("strh %w0, [%1]" : : "rZ" (val), "r" (addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define __raw_writel __raw_writel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) static __always_inline void __raw_writel(u32 val, volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) log_write_mmio(val, 32, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) asm volatile("str %w0, [%1]" : : "rZ" (val), "r" (addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define __raw_writeq __raw_writeq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) static inline void __raw_writeq(u64 val, volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) log_write_mmio(val, 64, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) asm volatile("str %x0, [%1]" : : "rZ" (val), "r" (addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #define __raw_readb __raw_readb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) static inline u8 __raw_readb(const volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) u8 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) log_read_mmio(8, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) asm volatile(ALTERNATIVE("ldrb %w0, [%1]",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) "ldarb %w0, [%1]",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) : "=r" (val) : "r" (addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) log_post_read_mmio(val, 8, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) #define __raw_readw __raw_readw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) static inline u16 __raw_readw(const volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) u16 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) log_read_mmio(16, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) asm volatile(ALTERNATIVE("ldrh %w0, [%1]",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) "ldarh %w0, [%1]",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) : "=r" (val) : "r" (addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) log_post_read_mmio(val, 16, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #define __raw_readl __raw_readl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) static __always_inline u32 __raw_readl(const volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) u32 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) log_read_mmio(32, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) asm volatile(ALTERNATIVE("ldr %w0, [%1]",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) "ldar %w0, [%1]",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) : "=r" (val) : "r" (addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) log_post_read_mmio(val, 32, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) #define __raw_readq __raw_readq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) static inline u64 __raw_readq(const volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) u64 val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) log_read_mmio(64, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) asm volatile(ALTERNATIVE("ldr %0, [%1]",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) "ldar %0, [%1]",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) : "=r" (val) : "r" (addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) log_post_read_mmio(val, 64, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /* IO barriers */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #define __iormb(v) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) ({ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) unsigned long tmp; \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) dma_rmb(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) /* \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * Create a dummy control dependency from the IO read to any \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * later instructions. This ensures that a subsequent call to \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * udelay() will be ordered due to the ISB in get_cycles(). \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) */ \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) asm volatile("eor %0, %1, %1\n" \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) "cbnz %0, ." \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) : "=r" (tmp) : "r" ((unsigned long)(v)) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) : "memory"); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #define __io_par(v) __iormb(v)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #define __iowmb() dma_wmb()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) #define __iomb() dma_mb()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * Relaxed I/O memory access primitives. These follow the Device memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * ordering rules but do not guarantee any ordering relative to Normal memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * accesses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) #define readb_relaxed(c) ({ u8 __r = __raw_readb(c); __r; })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #define readw_relaxed(c) ({ u16 __r = le16_to_cpu((__force __le16)__raw_readw(c)); __r; })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) #define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32)__raw_readl(c)); __r; })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #define readq_relaxed(c) ({ u64 __r = le64_to_cpu((__force __le64)__raw_readq(c)); __r; })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) #define writeb_relaxed(v,c) ((void)__raw_writeb((v),(c)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) #define writew_relaxed(v,c) ((void)__raw_writew((__force u16)cpu_to_le16(v),(c)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) #define writel_relaxed(v,c) ((void)__raw_writel((__force u32)cpu_to_le32(v),(c)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #define writeq_relaxed(v,c) ((void)__raw_writeq((__force u64)cpu_to_le64(v),(c)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * I/O memory access primitives. Reads are ordered relative to any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * following Normal memory access. Writes are ordered relative to any prior
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * Normal memory access.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) #define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(__v); __v; })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) #define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(__v); __v; })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) #define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(__v); __v; })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) #define readq(c) ({ u64 __v = readq_relaxed(c); __iormb(__v); __v; })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) #define writeb(v,c) ({ __iowmb(); writeb_relaxed((v),(c)); })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) #define writew(v,c) ({ __iowmb(); writew_relaxed((v),(c)); })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) #define writel(v,c) ({ __iowmb(); writel_relaxed((v),(c)); })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) #define writeq(v,c) ({ __iowmb(); writeq_relaxed((v),(c)); })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * I/O port access primitives.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) #define arch_has_dev_port() (1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) #define IO_SPACE_LIMIT (PCI_IO_SIZE - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) #define PCI_IOBASE ((void __iomem *)PCI_IO_START)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * String version of I/O memory access operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) extern void __memcpy_fromio(void *, const volatile void __iomem *, size_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) extern void __memcpy_toio(volatile void __iomem *, const void *, size_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) extern void __memset_io(volatile void __iomem *, int, size_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) #define memset_io(c,v,l) __memset_io((c),(v),(l))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) #define memcpy_fromio(a,c,l) __memcpy_fromio((a),(c),(l))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) #define memcpy_toio(c,a,l) __memcpy_toio((c),(a),(l))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * I/O memory mapping functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) extern void iounmap(volatile void __iomem *addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) #define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) #define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * PCI configuration space mapping function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * The PCI specification disallows posted write configuration transactions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) * Add an arch specific pci_remap_cfgspace() definition that is implemented
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) * through nGnRnE device memory attribute as recommended by the ARM v8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * Architecture reference manual Issue A.k B2.8.2 "Device memory".
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) #define pci_remap_cfgspace(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRnE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * io{read,write}{16,32,64}be() macros
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) #define ioread16be(p) ({ __u16 __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(__v); __v; })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) #define ioread32be(p) ({ __u32 __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(__v); __v; })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) #define ioread64be(p) ({ __u64 __v = be64_to_cpu((__force __be64)__raw_readq(p)); __iormb(__v); __v; })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) #define iowrite16be(v,p) ({ __iowmb(); __raw_writew((__force __u16)cpu_to_be16(v), p); })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) #define iowrite32be(v,p) ({ __iowmb(); __raw_writel((__force __u32)cpu_to_be32(v), p); })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) #define iowrite64be(v,p) ({ __iowmb(); __raw_writeq((__force __u64)cpu_to_be64(v), p); })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) #include <asm-generic/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * More restrictive address range checking than the default implementation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * (PHYS_OFFSET and PHYS_MASK taken into account).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) #define ARCH_HAS_VALID_PHYS_ADDR_RANGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) extern int devmem_is_allowed(unsigned long pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) #endif /* __ASM_IO_H */