Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) #ifndef __ASM_SH_IO_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) #define __ASM_SH_IO_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * Convention:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *    read{b,w,l,q}/write{b,w,l,q} are for PCI,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  *    while in{b,w,l}/out{b,w,l} are for ISA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  * In addition we have 'pausing' versions: in{b,w,l}_p/out{b,w,l}_p
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  * and 'string' versions: ins{b,w,l}/outs{b,w,l}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  * While read{b,w,l,q} and write{b,w,l,q} contain memory barriers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  * automatically, there are also __raw versions, which do not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <asm/cache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <asm/addrspace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <asm/machvec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/pgtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <asm-generic/iomap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #define __IO_PREFIX     generic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <asm/io_generic.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include <asm-generic/pci_iomap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #include <mach/mangle-port.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #define __raw_writeb(v,a)	(__chk_io_ptr(a), *(volatile u8  __force *)(a) = (v))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #define __raw_writew(v,a)	(__chk_io_ptr(a), *(volatile u16 __force *)(a) = (v))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #define __raw_writel(v,a)	(__chk_io_ptr(a), *(volatile u32 __force *)(a) = (v))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #define __raw_writeq(v,a)	(__chk_io_ptr(a), *(volatile u64 __force *)(a) = (v))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #define __raw_readb(a)		(__chk_io_ptr(a), *(volatile u8  __force *)(a))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) #define __raw_readw(a)		(__chk_io_ptr(a), *(volatile u16 __force *)(a))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #define __raw_readl(a)		(__chk_io_ptr(a), *(volatile u32 __force *)(a))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) #define __raw_readq(a)		(__chk_io_ptr(a), *(volatile u64 __force *)(a))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #define readb_relaxed(c)	({ u8  __v = ioswabb(__raw_readb(c)); __v; })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #define readw_relaxed(c)	({ u16 __v = ioswabw(__raw_readw(c)); __v; })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) #define readl_relaxed(c)	({ u32 __v = ioswabl(__raw_readl(c)); __v; })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) #define readq_relaxed(c)	({ u64 __v = ioswabq(__raw_readq(c)); __v; })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) #define writeb_relaxed(v,c)	((void)__raw_writeb((__force  u8)ioswabb(v),c))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) #define writew_relaxed(v,c)	((void)__raw_writew((__force u16)ioswabw(v),c))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) #define writel_relaxed(v,c)	((void)__raw_writel((__force u32)ioswabl(v),c))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) #define writeq_relaxed(v,c)	((void)__raw_writeq((__force u64)ioswabq(v),c))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) #define readb(a)		({ u8  r_ = readb_relaxed(a); rmb(); r_; })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) #define readw(a)		({ u16 r_ = readw_relaxed(a); rmb(); r_; })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) #define readl(a)		({ u32 r_ = readl_relaxed(a); rmb(); r_; })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) #define readq(a)		({ u64 r_ = readq_relaxed(a); rmb(); r_; })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) #define writeb(v,a)		({ wmb(); writeb_relaxed((v),(a)); })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) #define writew(v,a)		({ wmb(); writew_relaxed((v),(a)); })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) #define writel(v,a)		({ wmb(); writel_relaxed((v),(a)); })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) #define writeq(v,a)		({ wmb(); writeq_relaxed((v),(a)); })
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) #define readsb(p,d,l)		__raw_readsb(p,d,l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) #define readsw(p,d,l)		__raw_readsw(p,d,l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) #define readsl(p,d,l)		__raw_readsl(p,d,l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) #define writesb(p,d,l)		__raw_writesb(p,d,l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) #define writesw(p,d,l)		__raw_writesw(p,d,l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) #define writesl(p,d,l)		__raw_writesl(p,d,l)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) #define __BUILD_UNCACHED_IO(bwlq, type)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) static inline type read##bwlq##_uncached(unsigned long addr)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	type ret;							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	jump_to_uncached();						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	ret = __raw_read##bwlq(addr);					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	back_to_cached();						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	return ret;							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) }									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) static inline void write##bwlq##_uncached(type v, unsigned long addr)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	jump_to_uncached();						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	__raw_write##bwlq(v, addr);					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	back_to_cached();						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) __BUILD_UNCACHED_IO(b, u8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) __BUILD_UNCACHED_IO(w, u16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) __BUILD_UNCACHED_IO(l, u32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) __BUILD_UNCACHED_IO(q, u64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) #define __BUILD_MEMORY_STRING(pfx, bwlq, type)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) static inline void							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) pfx##writes##bwlq(volatile void __iomem *mem, const void *addr,		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 		  unsigned int count)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	const volatile type *__addr = addr;				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	while (count--) {						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 		__raw_write##bwlq(*__addr, mem);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 		__addr++;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	}								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) static inline void pfx##reads##bwlq(volatile void __iomem *mem,		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 				    void *addr, unsigned int count)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	volatile type *__addr = addr;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	while (count--) {						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 		*__addr = __raw_read##bwlq(mem);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 		__addr++;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	}								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) __BUILD_MEMORY_STRING(__raw_, b, u8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) __BUILD_MEMORY_STRING(__raw_, w, u16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) void __raw_writesl(void __iomem *addr, const void *data, int longlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) void __raw_readsl(const void __iomem *addr, void *data, int longlen);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) __BUILD_MEMORY_STRING(__raw_, q, u64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #ifdef CONFIG_HAS_IOPORT_MAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)  * Slowdown I/O port space accesses for antique hardware.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #undef CONF_SLOWDOWN_IO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)  * On SuperH I/O ports are memory mapped, so we access them using normal
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)  * load/store instructions. sh_io_port_base is the virtual address to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)  * which all ports are being mapped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) extern unsigned long sh_io_port_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) static inline void __set_io_port_base(unsigned long pbase)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	*(unsigned long *)&sh_io_port_base = pbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) #ifdef CONFIG_GENERIC_IOMAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) #define __ioport_map ioport_map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) extern void __iomem *__ioport_map(unsigned long addr, unsigned int size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) #ifdef CONF_SLOWDOWN_IO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) #define SLOW_DOWN_IO __raw_readw(sh_io_port_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) #define SLOW_DOWN_IO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) #define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, p, slow)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) static inline void pfx##out##bwlq##p(type val, unsigned long port)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	volatile type *__addr;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	__addr = __ioport_map(port, sizeof(type));			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	*__addr = val;							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	slow;								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) static inline type pfx##in##bwlq##p(unsigned long port)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	volatile type *__addr;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	type __val;							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	__addr = __ioport_map(port, sizeof(type));			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	__val = *__addr;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	slow;								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	return __val;							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) #define __BUILD_IOPORT_PFX(bus, bwlq, type)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	__BUILD_IOPORT_SINGLE(bus, bwlq, type, ,)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	__BUILD_IOPORT_SINGLE(bus, bwlq, type, _p, SLOW_DOWN_IO)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) #define BUILDIO_IOPORT(bwlq, type)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	__BUILD_IOPORT_PFX(, bwlq, type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) BUILDIO_IOPORT(b, u8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) BUILDIO_IOPORT(w, u16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) BUILDIO_IOPORT(l, u32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) BUILDIO_IOPORT(q, u64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) #define __BUILD_IOPORT_STRING(bwlq, type)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) static inline void outs##bwlq(unsigned long port, const void *addr,	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 			      unsigned int count)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	const volatile type *__addr = addr;				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 	while (count--) {						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 		out##bwlq(*__addr, port);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 		__addr++;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	}								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) static inline void ins##bwlq(unsigned long port, void *addr,		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 			     unsigned int count)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	volatile type *__addr = addr;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	while (count--) {						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 		*__addr = in##bwlq(port);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 		__addr++;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	}								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) __BUILD_IOPORT_STRING(b, u8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) __BUILD_IOPORT_STRING(w, u16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) __BUILD_IOPORT_STRING(l, u32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) __BUILD_IOPORT_STRING(q, u64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) #else /* !CONFIG_HAS_IOPORT_MAP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) #include <asm/io_noioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) #define IO_SPACE_LIMIT 0xffffffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) /* We really want to try and get these to memcpy etc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) void memcpy_fromio(void *, const volatile void __iomem *, unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) void memcpy_toio(volatile void __iomem *, const void *, unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) void memset_io(volatile void __iomem *, int, unsigned long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) /* Quad-word real-mode I/O, don't ask.. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) unsigned long long peek_real_address_q(unsigned long long addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) unsigned long long poke_real_address_q(unsigned long long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 				       unsigned long long val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) #if !defined(CONFIG_MMU)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) #define virt_to_phys(address)	((unsigned long)(address))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) #define phys_to_virt(address)	((void *)(address))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) #define virt_to_phys(address)	(__pa(address))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) #define phys_to_virt(address)	(__va(address))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) #ifdef CONFIG_MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) void iounmap(void __iomem *addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) void __iomem *__ioremap_caller(phys_addr_t offset, unsigned long size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 			       pgprot_t prot, void *caller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	return __ioremap_caller(offset, size, PAGE_KERNEL_NOCACHE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 			__builtin_return_address(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) static inline void __iomem *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) ioremap_cache(phys_addr_t offset, unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	return __ioremap_caller(offset, size, PAGE_KERNEL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 			__builtin_return_address(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) #define ioremap_cache ioremap_cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) #ifdef CONFIG_HAVE_IOREMAP_PROT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) static inline void __iomem *ioremap_prot(phys_addr_t offset, unsigned long size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 		unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	return __ioremap_caller(offset, size, __pgprot(flags),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 			__builtin_return_address(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) #endif /* CONFIG_HAVE_IOREMAP_PROT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) #else /* CONFIG_MMU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) #define iounmap(addr)		do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) #define ioremap(offset, size)	((void __iomem *)(unsigned long)(offset))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) #endif /* CONFIG_MMU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) #define ioremap_uc	ioremap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)  * Convert a physical pointer to a virtual kernel pointer for /dev/mem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282)  * access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) #define xlate_dev_mem_ptr(p)	__va(p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)  * Convert a virtual cached pointer to an uncached pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) #define xlate_dev_kmem_ptr(p)	p
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) #define ARCH_HAS_VALID_PHYS_ADDR_RANGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) int valid_phys_addr_range(phys_addr_t addr, size_t size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) #endif /* __ASM_SH_IO_H */