Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) #ifndef __ALPHA_IO_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) #define __ALPHA_IO_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) #ifdef __KERNEL__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <asm/compiler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <asm/machvec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <asm/hwrpb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) /* The generic header contains only prototypes.  Including it ensures that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)    the implementation we have here matches that interface.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <asm-generic/iomap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) /* We don't use IO slowdowns on the Alpha, but.. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #define __SLOW_DOWN_IO	do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #define SLOW_DOWN_IO	do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  * Virtual -> physical identity mapping starts at this offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #ifdef USE_48_BIT_KSEG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #define IDENT_ADDR     0xffff800000000000UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #define IDENT_ADDR     0xfffffc0000000000UL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  * We try to avoid hae updates (thus the cache), but when we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  * do need to update the hae, we need to do it atomically, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  * that any interrupts wouldn't get confused with the hae
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  * register not being up-to-date with respect to the hardware
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35)  * value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) extern inline void __set_hae(unsigned long new_hae)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	unsigned long flags = swpipl(IPL_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	alpha_mv.hae_cache = new_hae;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	*alpha_mv.hae_register = new_hae;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	/* Re-read to make sure it was written.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	new_hae = *alpha_mv.hae_register;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	setipl(flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	barrier();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) extern inline void set_hae(unsigned long new_hae)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	if (new_hae != alpha_mv.hae_cache)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 		__set_hae(new_hae);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60)  * Change virtual addresses to physical addresses and vv.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) #ifdef USE_48_BIT_KSEG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) static inline unsigned long virt_to_phys(volatile void *address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	return (unsigned long)address - IDENT_ADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) static inline void * phys_to_virt(unsigned long address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	return (void *) (address + IDENT_ADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) static inline unsigned long virt_to_phys(volatile void *address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75)         unsigned long phys = (unsigned long)address;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	/* Sign-extend from bit 41.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	phys <<= (64 - 41);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	phys = (long)phys >> (64 - 41);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	/* Crop to the physical address width of the processor.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82)         phys &= (1ul << hwrpb->pa_bits) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84)         return phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) static inline void * phys_to_virt(unsigned long address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89)         return (void *)(IDENT_ADDR + (address & ((1ul << 41) - 1)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) #define page_to_phys(page)	page_to_pa(page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) /* Maximum PIO space address supported?  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) #define IO_SPACE_LIMIT 0xffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99)  * Change addresses as seen by the kernel (virtual) to addresses as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)  * seen by a device (bus), and vice versa.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)  * Note that this only works for a limited range of kernel addresses,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)  * and very well may not span all memory.  Consider this interface 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)  * deprecated in favour of the DMA-mapping API.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) extern unsigned long __direct_map_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) extern unsigned long __direct_map_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) static inline unsigned long __deprecated virt_to_bus(volatile void *address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	unsigned long phys = virt_to_phys(address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	unsigned long bus = phys + __direct_map_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	return phys <= __direct_map_size ? bus : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #define isa_virt_to_bus virt_to_bus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) static inline void * __deprecated bus_to_virt(unsigned long address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	void *virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	/* This check is a sanity check but also ensures that bus address 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	   maps to virtual address 0 which is useful to detect null pointers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	   (the NCR driver is much simpler if NULL pointers are preserved).  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	address -= __direct_map_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	virt = phys_to_virt(address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	return (long)address <= 0 ? NULL : virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) #define isa_bus_to_virt bus_to_virt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)  * There are different chipsets to interface the Alpha CPUs to the world.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) #define IO_CONCAT(a,b)	_IO_CONCAT(a,b)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) #define _IO_CONCAT(a,b)	a ## _ ## b
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) #ifdef CONFIG_ALPHA_GENERIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) /* In a generic kernel, we always go through the machine vector.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) #define REMAP1(TYPE, NAME, QUAL)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) static inline TYPE generic_##NAME(QUAL void __iomem *addr)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	return alpha_mv.mv_##NAME(addr);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #define REMAP2(TYPE, NAME, QUAL)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) static inline void generic_##NAME(TYPE b, QUAL void __iomem *addr)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	alpha_mv.mv_##NAME(b, addr);					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) REMAP1(unsigned int, ioread8, const)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) REMAP1(unsigned int, ioread16, const)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) REMAP1(unsigned int, ioread32, const)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) REMAP1(u8, readb, const volatile)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) REMAP1(u16, readw, const volatile)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) REMAP1(u32, readl, const volatile)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) REMAP1(u64, readq, const volatile)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) REMAP2(u8, iowrite8, /**/)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) REMAP2(u16, iowrite16, /**/)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) REMAP2(u32, iowrite32, /**/)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) REMAP2(u8, writeb, volatile)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) REMAP2(u16, writew, volatile)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) REMAP2(u32, writel, volatile)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) REMAP2(u64, writeq, volatile)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) #undef REMAP1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) #undef REMAP2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) extern inline void __iomem *generic_ioportmap(unsigned long a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	return alpha_mv.mv_ioportmap(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) static inline void __iomem *generic_ioremap(unsigned long a, unsigned long s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	return alpha_mv.mv_ioremap(a, s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) static inline void generic_iounmap(volatile void __iomem *a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	return alpha_mv.mv_iounmap(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) static inline int generic_is_ioaddr(unsigned long a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	return alpha_mv.mv_is_ioaddr(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) static inline int generic_is_mmio(const volatile void __iomem *a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	return alpha_mv.mv_is_mmio(a);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) #define __IO_PREFIX		generic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) #define generic_trivial_rw_bw	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) #define generic_trivial_rw_lq	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) #define generic_trivial_io_bw	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) #define generic_trivial_io_lq	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) #define generic_trivial_iounmap	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) #if defined(CONFIG_ALPHA_APECS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) # include <asm/core_apecs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) #elif defined(CONFIG_ALPHA_CIA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) # include <asm/core_cia.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) #elif defined(CONFIG_ALPHA_IRONGATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) # include <asm/core_irongate.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) #elif defined(CONFIG_ALPHA_JENSEN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) # include <asm/jensen.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) #elif defined(CONFIG_ALPHA_LCA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) # include <asm/core_lca.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) #elif defined(CONFIG_ALPHA_MARVEL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) # include <asm/core_marvel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) #elif defined(CONFIG_ALPHA_MCPCIA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) # include <asm/core_mcpcia.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) #elif defined(CONFIG_ALPHA_POLARIS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) # include <asm/core_polaris.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) #elif defined(CONFIG_ALPHA_T2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) # include <asm/core_t2.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) #elif defined(CONFIG_ALPHA_TSUNAMI)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) # include <asm/core_tsunami.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) #elif defined(CONFIG_ALPHA_TITAN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) # include <asm/core_titan.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) #elif defined(CONFIG_ALPHA_WILDFIRE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) # include <asm/core_wildfire.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) #error "What system is this?"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) #endif /* GENERIC */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237)  * We always have external versions of these routines.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) extern u8		inb(unsigned long port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) extern u16		inw(unsigned long port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) extern u32		inl(unsigned long port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) extern void		outb(u8 b, unsigned long port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) extern void		outw(u16 b, unsigned long port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) extern void		outl(u32 b, unsigned long port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) extern u8		readb(const volatile void __iomem *addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) extern u16		readw(const volatile void __iomem *addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) extern u32		readl(const volatile void __iomem *addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) extern u64		readq(const volatile void __iomem *addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) extern void		writeb(u8 b, volatile void __iomem *addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) extern void		writew(u16 b, volatile void __iomem *addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) extern void		writel(u32 b, volatile void __iomem *addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) extern void		writeq(u64 b, volatile void __iomem *addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) extern u8		__raw_readb(const volatile void __iomem *addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) extern u16		__raw_readw(const volatile void __iomem *addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) extern u32		__raw_readl(const volatile void __iomem *addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) extern u64		__raw_readq(const volatile void __iomem *addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) extern void		__raw_writeb(u8 b, volatile void __iomem *addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) extern void		__raw_writew(u16 b, volatile void __iomem *addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) extern void		__raw_writel(u32 b, volatile void __iomem *addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) extern void		__raw_writeq(u64 b, volatile void __iomem *addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)  * Mapping from port numbers to __iomem space is pretty easy.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) /* These two have to be extern inline because of the extern prototype from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)    <asm-generic/iomap.h>.  It is not legal to mix "extern" and "static" for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)    the same declaration.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) extern inline void __iomem *ioport_map(unsigned long port, unsigned int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	return IO_CONCAT(__IO_PREFIX,ioportmap) (port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) extern inline void ioport_unmap(void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) static inline void __iomem *ioremap(unsigned long port, unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 	return IO_CONCAT(__IO_PREFIX,ioremap) (port, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) #define ioremap_wc ioremap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) #define ioremap_uc ioremap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) static inline void iounmap(volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	IO_CONCAT(__IO_PREFIX,iounmap)(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) static inline int __is_ioaddr(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	return IO_CONCAT(__IO_PREFIX,is_ioaddr)(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) #define __is_ioaddr(a)		__is_ioaddr((unsigned long)(a))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) static inline int __is_mmio(const volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	return IO_CONCAT(__IO_PREFIX,is_mmio)(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)  * If the actual I/O bits are sufficiently trivial, then expand inline.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) #if IO_CONCAT(__IO_PREFIX,trivial_io_bw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) extern inline unsigned int ioread8(const void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	unsigned int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) extern inline unsigned int ioread16(const void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	unsigned int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) extern inline void iowrite8(u8 b, void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	IO_CONCAT(__IO_PREFIX, iowrite8)(b, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) extern inline void iowrite16(u16 b, void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	IO_CONCAT(__IO_PREFIX, iowrite16)(b, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) extern inline u8 inb(unsigned long port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 	return ioread8(ioport_map(port, 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) extern inline u16 inw(unsigned long port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	return ioread16(ioport_map(port, 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) extern inline void outb(u8 b, unsigned long port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 	iowrite8(b, ioport_map(port, 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) extern inline void outw(u16 b, unsigned long port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	iowrite16(b, ioport_map(port, 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) #if IO_CONCAT(__IO_PREFIX,trivial_io_lq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) extern inline unsigned int ioread32(const void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	unsigned int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) extern inline void iowrite32(u32 b, void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 	IO_CONCAT(__IO_PREFIX, iowrite32)(b, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) extern inline u32 inl(unsigned long port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 	return ioread32(ioport_map(port, 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) extern inline void outl(u32 b, unsigned long port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	iowrite32(b, ioport_map(port, 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) #if IO_CONCAT(__IO_PREFIX,trivial_rw_bw) == 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) extern inline u8 __raw_readb(const volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 	return IO_CONCAT(__IO_PREFIX,readb)(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) extern inline u16 __raw_readw(const volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	return IO_CONCAT(__IO_PREFIX,readw)(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) extern inline void __raw_writeb(u8 b, volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 	IO_CONCAT(__IO_PREFIX,writeb)(b, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) extern inline void __raw_writew(u16 b, volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	IO_CONCAT(__IO_PREFIX,writew)(b, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) extern inline u8 readb(const volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	u8 ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	ret = __raw_readb(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) extern inline u16 readw(const volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	u16 ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 	ret = __raw_readw(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) extern inline void writeb(u8 b, volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 	__raw_writeb(b, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) extern inline void writew(u16 b, volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 	__raw_writew(b, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) #if IO_CONCAT(__IO_PREFIX,trivial_rw_lq) == 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) extern inline u32 __raw_readl(const volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 	return IO_CONCAT(__IO_PREFIX,readl)(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) extern inline u64 __raw_readq(const volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 	return IO_CONCAT(__IO_PREFIX,readq)(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) extern inline void __raw_writel(u32 b, volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 	IO_CONCAT(__IO_PREFIX,writel)(b, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) extern inline void __raw_writeq(u64 b, volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 	IO_CONCAT(__IO_PREFIX,writeq)(b, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) extern inline u32 readl(const volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	u32 ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 	ret = __raw_readl(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) extern inline u64 readq(const volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	u64 ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 	ret = __raw_readq(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) extern inline void writel(u32 b, volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 	__raw_writel(b, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) extern inline void writeq(u64 b, volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 	__raw_writeq(b, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) #define ioread16be(p) swab16(ioread16(p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) #define ioread32be(p) swab32(ioread32(p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) #define iowrite16be(v,p) iowrite16(swab16(v), (p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) #define iowrite32be(v,p) iowrite32(swab32(v), (p))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) #define inb_p		inb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) #define inw_p		inw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) #define inl_p		inl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) #define outb_p		outb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) #define outw_p		outw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) #define outl_p		outl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) extern u8 readb_relaxed(const volatile void __iomem *addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) extern u16 readw_relaxed(const volatile void __iomem *addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) extern u32 readl_relaxed(const volatile void __iomem *addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) extern u64 readq_relaxed(const volatile void __iomem *addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) #if IO_CONCAT(__IO_PREFIX,trivial_io_bw)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) extern inline u8 readb_relaxed(const volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 	return __raw_readb(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) extern inline u16 readw_relaxed(const volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 	return __raw_readw(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) #if IO_CONCAT(__IO_PREFIX,trivial_io_lq)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) extern inline u32 readl_relaxed(const volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	return __raw_readl(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) extern inline u64 readq_relaxed(const volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 	return __raw_readq(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) #define writeb_relaxed	writeb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) #define writew_relaxed	writew
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) #define writel_relaxed	writel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) #define writeq_relaxed	writeq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)  * String version of IO memory access ops:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) extern void memcpy_fromio(void *, const volatile void __iomem *, long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) extern void memcpy_toio(volatile void __iomem *, const void *, long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) extern void _memset_c_io(volatile void __iomem *, unsigned long, long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) static inline void memset_io(volatile void __iomem *addr, u8 c, long len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 	_memset_c_io(addr, 0x0101010101010101UL * c, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) #define __HAVE_ARCH_MEMSETW_IO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) static inline void memsetw_io(volatile void __iomem *addr, u16 c, long len)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 	_memset_c_io(addr, 0x0001000100010001UL * c, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561)  * String versions of in/out ops:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) extern void insb (unsigned long port, void *dst, unsigned long count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) extern void insw (unsigned long port, void *dst, unsigned long count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) extern void insl (unsigned long port, void *dst, unsigned long count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) extern void outsb (unsigned long port, const void *src, unsigned long count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) extern void outsw (unsigned long port, const void *src, unsigned long count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) extern void outsl (unsigned long port, const void *src, unsigned long count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571)  * The Alpha Jensen hardware for some rather strange reason puts
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)  * the RTC clock at 0x170 instead of 0x70. Probably due to some
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)  * misguided idea about using 0x70 for NMI stuff.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)  * These defines will override the defaults when doing RTC queries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) #ifdef CONFIG_ALPHA_GENERIC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) # define RTC_PORT(x)	((x) + alpha_mv.rtc_port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) # ifdef CONFIG_ALPHA_JENSEN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) #  define RTC_PORT(x)	(0x170+(x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) # else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) #  define RTC_PORT(x)	(0x70 + (x))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) # endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) #define RTC_ALWAYS_BCD	0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590)  * Some mucking forons use if[n]def writeq to check if platform has it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)  * It's a bloody bad idea and we probably want ARCH_HAS_WRITEQ for them
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)  * to play with; for now just use cpp anti-recursion logics and make sure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)  * that damn thing is defined and expands to itself.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) #define writeq writeq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) #define readq readq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600)  * Convert a physical pointer to a virtual kernel pointer for /dev/mem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601)  * access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) #define xlate_dev_mem_ptr(p)	__va(p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)  * Convert a virtual cached pointer to an uncached pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) #define xlate_dev_kmem_ptr(p)	p
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) #endif /* __KERNEL__ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) #endif /* __ALPHA_IO_H */