Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Alpha IO and memory functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) /* Out-of-line versions of the i/o routines that redirect into the 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)    platform-specific version.  Note that "platform-specific" may mean
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)    "generic", which bumps through the machine vector.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) unsigned int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) ioread8(const void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 	unsigned int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 	ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) unsigned int ioread16(const void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 	unsigned int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) 	ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) unsigned int ioread32(const void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	unsigned int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) void iowrite8(u8 b, void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	IO_CONCAT(__IO_PREFIX,iowrite8)(b, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) void iowrite16(u16 b, void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 	IO_CONCAT(__IO_PREFIX,iowrite16)(b, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) void iowrite32(u32 b, void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	IO_CONCAT(__IO_PREFIX,iowrite32)(b, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) EXPORT_SYMBOL(ioread8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) EXPORT_SYMBOL(ioread16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) EXPORT_SYMBOL(ioread32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) EXPORT_SYMBOL(iowrite8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) EXPORT_SYMBOL(iowrite16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) EXPORT_SYMBOL(iowrite32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) u8 inb(unsigned long port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	return ioread8(ioport_map(port, 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) u16 inw(unsigned long port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	return ioread16(ioport_map(port, 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) u32 inl(unsigned long port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 	return ioread32(ioport_map(port, 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) void outb(u8 b, unsigned long port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	iowrite8(b, ioport_map(port, 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) void outw(u16 b, unsigned long port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	iowrite16(b, ioport_map(port, 2));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) void outl(u32 b, unsigned long port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	iowrite32(b, ioport_map(port, 4));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) EXPORT_SYMBOL(inb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) EXPORT_SYMBOL(inw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) EXPORT_SYMBOL(inl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) EXPORT_SYMBOL(outb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) EXPORT_SYMBOL(outw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) EXPORT_SYMBOL(outl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) u8 __raw_readb(const volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	return IO_CONCAT(__IO_PREFIX,readb)(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) u16 __raw_readw(const volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	return IO_CONCAT(__IO_PREFIX,readw)(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) u32 __raw_readl(const volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	return IO_CONCAT(__IO_PREFIX,readl)(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) u64 __raw_readq(const volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	return IO_CONCAT(__IO_PREFIX,readq)(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) void __raw_writeb(u8 b, volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	IO_CONCAT(__IO_PREFIX,writeb)(b, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) void __raw_writew(u16 b, volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	IO_CONCAT(__IO_PREFIX,writew)(b, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) void __raw_writel(u32 b, volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	IO_CONCAT(__IO_PREFIX,writel)(b, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) void __raw_writeq(u64 b, volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 	IO_CONCAT(__IO_PREFIX,writeq)(b, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) EXPORT_SYMBOL(__raw_readb); 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) EXPORT_SYMBOL(__raw_readw); 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) EXPORT_SYMBOL(__raw_readl); 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) EXPORT_SYMBOL(__raw_readq); 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) EXPORT_SYMBOL(__raw_writeb); 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) EXPORT_SYMBOL(__raw_writew); 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) EXPORT_SYMBOL(__raw_writel); 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) EXPORT_SYMBOL(__raw_writeq); 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) u8 readb(const volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	u8 ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 	ret = __raw_readb(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) u16 readw(const volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	u16 ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	ret = __raw_readw(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) u32 readl(const volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	u32 ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	ret = __raw_readl(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) u64 readq(const volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	u64 ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	ret = __raw_readq(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) void writeb(u8 b, volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	__raw_writeb(b, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) void writew(u16 b, volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 	__raw_writew(b, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) void writel(u32 b, volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	__raw_writel(b, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) void writeq(u64 b, volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	__raw_writeq(b, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) EXPORT_SYMBOL(readb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) EXPORT_SYMBOL(readw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) EXPORT_SYMBOL(readl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) EXPORT_SYMBOL(readq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) EXPORT_SYMBOL(writeb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) EXPORT_SYMBOL(writew);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) EXPORT_SYMBOL(writel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) EXPORT_SYMBOL(writeq);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)  * The _relaxed functions must be ordered w.r.t. each other, but they don't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)  * have to be ordered w.r.t. other memory accesses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) u8 readb_relaxed(const volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 	return __raw_readb(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) u16 readw_relaxed(const volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	return __raw_readw(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) u32 readl_relaxed(const volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	return __raw_readl(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) u64 readq_relaxed(const volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	return __raw_readq(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) EXPORT_SYMBOL(readb_relaxed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) EXPORT_SYMBOL(readw_relaxed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) EXPORT_SYMBOL(readl_relaxed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) EXPORT_SYMBOL(readq_relaxed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)  * Read COUNT 8-bit bytes from port PORT into memory starting at SRC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) void ioread8_rep(const void __iomem *port, void *dst, unsigned long count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	while ((unsigned long)dst & 0x3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 		if (!count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 		count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 		*(unsigned char *)dst = ioread8(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 		dst += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	while (count >= 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 		unsigned int w;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 		count -= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 		w = ioread8(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 		w |= ioread8(port) << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 		w |= ioread8(port) << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 		w |= ioread8(port) << 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 		*(unsigned int *)dst = w;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 		dst += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	while (count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 		--count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 		*(unsigned char *)dst = ioread8(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 		dst += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) void insb(unsigned long port, void *dst, unsigned long count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	ioread8_rep(ioport_map(port, 1), dst, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) EXPORT_SYMBOL(ioread8_rep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) EXPORT_SYMBOL(insb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297)  * Read COUNT 16-bit words from port PORT into memory starting at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298)  * SRC.  SRC must be at least short aligned.  This is used by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)  * IDE driver to read disk sectors.  Performance is important, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)  * the interfaces seems to be slow: just using the inlined version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)  * of the inw() breaks things.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) void ioread16_rep(const void __iomem *port, void *dst, unsigned long count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	if (unlikely((unsigned long)dst & 0x3)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 		if (!count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 		BUG_ON((unsigned long)dst & 0x1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 		count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 		*(unsigned short *)dst = ioread16(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 		dst += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	while (count >= 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 		unsigned int w;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 		count -= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 		w = ioread16(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 		w |= ioread16(port) << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 		*(unsigned int *)dst = w;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 		dst += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 	if (count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 		*(unsigned short*)dst = ioread16(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) void insw(unsigned long port, void *dst, unsigned long count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	ioread16_rep(ioport_map(port, 2), dst, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) EXPORT_SYMBOL(ioread16_rep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) EXPORT_SYMBOL(insw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)  * Read COUNT 32-bit words from port PORT into memory starting at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)  * SRC. Now works with any alignment in SRC. Performance is important,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340)  * but the interfaces seems to be slow: just using the inlined version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)  * of the inl() breaks things.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) void ioread32_rep(const void __iomem *port, void *dst, unsigned long count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 	if (unlikely((unsigned long)dst & 0x3)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 		while (count--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 			struct S { int x __attribute__((packed)); };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 			((struct S *)dst)->x = ioread32(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 			dst += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 		/* Buffer 32-bit aligned.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 		while (count--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 			*(unsigned int *)dst = ioread32(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 			dst += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) void insl(unsigned long port, void *dst, unsigned long count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	ioread32_rep(ioport_map(port, 4), dst, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) EXPORT_SYMBOL(ioread32_rep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) EXPORT_SYMBOL(insl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)  * Like insb but in the opposite direction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)  * Don't worry as much about doing aligned memory transfers:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)  * doing byte reads the "slow" way isn't nearly as slow as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)  * doing byte writes the slow way (no r-m-w cycle).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) void iowrite8_rep(void __iomem *port, const void *xsrc, unsigned long count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 	const unsigned char *src = xsrc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 	while (count--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 		iowrite8(*src++, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) void outsb(unsigned long port, const void *src, unsigned long count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	iowrite8_rep(ioport_map(port, 1), src, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) EXPORT_SYMBOL(iowrite8_rep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) EXPORT_SYMBOL(outsb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)  * Like insw but in the opposite direction.  This is used by the IDE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)  * driver to write disk sectors.  Performance is important, but the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)  * interfaces seems to be slow: just using the inlined version of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)  * outw() breaks things.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) void iowrite16_rep(void __iomem *port, const void *src, unsigned long count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	if (unlikely((unsigned long)src & 0x3)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 		if (!count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 		BUG_ON((unsigned long)src & 0x1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 		iowrite16(*(unsigned short *)src, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 		src += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 		--count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 	while (count >= 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 		unsigned int w;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 		count -= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 		w = *(unsigned int *)src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 		src += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 		iowrite16(w >>  0, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 		iowrite16(w >> 16, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 	if (count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 		iowrite16(*(unsigned short *)src, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) void outsw(unsigned long port, const void *src, unsigned long count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	iowrite16_rep(ioport_map(port, 2), src, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) EXPORT_SYMBOL(iowrite16_rep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) EXPORT_SYMBOL(outsw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)  * Like insl but in the opposite direction.  This is used by the IDE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433)  * driver to write disk sectors.  Works with any alignment in SRC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)  * Performance is important, but the interfaces seems to be slow:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)  * just using the inlined version of the outl() breaks things.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) void iowrite32_rep(void __iomem *port, const void *src, unsigned long count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 	if (unlikely((unsigned long)src & 0x3)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 		while (count--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 			struct S { int x __attribute__((packed)); };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 			iowrite32(((struct S *)src)->x, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 			src += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 		/* Buffer 32-bit aligned.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 		while (count--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 			iowrite32(*(unsigned int *)src, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 			src += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) void outsl(unsigned long port, const void *src, unsigned long count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 	iowrite32_rep(ioport_map(port, 4), src, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) EXPORT_SYMBOL(iowrite32_rep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) EXPORT_SYMBOL(outsl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464)  * Copy data from IO memory space to "real" memory space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)  * This needs to be optimized.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) void memcpy_fromio(void *to, const volatile void __iomem *from, long count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 	/* Optimize co-aligned transfers.  Everything else gets handled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 	   a byte at a time. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 	if (count >= 8 && ((u64)to & 7) == ((u64)from & 7)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 		count -= 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 			*(u64 *)to = __raw_readq(from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 			count -= 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 			to += 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 			from += 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 		} while (count >= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 		count += 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 	if (count >= 4 && ((u64)to & 3) == ((u64)from & 3)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 		count -= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) 			*(u32 *)to = __raw_readl(from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 			count -= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 			to += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 			from += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 		} while (count >= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) 		count += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 	if (count >= 2 && ((u64)to & 1) == ((u64)from & 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) 		count -= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) 			*(u16 *)to = __raw_readw(from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 			count -= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 			to += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 			from += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 		} while (count >= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 		count += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 	while (count > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 		*(u8 *) to = __raw_readb(from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 		count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 		to++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) 		from++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) EXPORT_SYMBOL(memcpy_fromio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)  * Copy data from "real" memory space to IO memory space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)  * This needs to be optimized.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) void memcpy_toio(volatile void __iomem *to, const void *from, long count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) 	/* Optimize co-aligned transfers.  Everything else gets handled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 	   a byte at a time. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) 	/* FIXME -- align FROM.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	if (count >= 8 && ((u64)to & 7) == ((u64)from & 7)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 		count -= 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 			__raw_writeq(*(const u64 *)from, to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 			count -= 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 			to += 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 			from += 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 		} while (count >= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 		count += 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 	if (count >= 4 && ((u64)to & 3) == ((u64)from & 3)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 		count -= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 			__raw_writel(*(const u32 *)from, to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 			count -= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 			to += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 			from += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 		} while (count >= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 		count += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 	if (count >= 2 && ((u64)to & 1) == ((u64)from & 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 		count -= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 			__raw_writew(*(const u16 *)from, to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 			count -= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 			to += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 			from += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 		} while (count >= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 		count += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 	while (count > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 		__raw_writeb(*(const u8 *) from, to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 		count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 		to++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 		from++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) EXPORT_SYMBOL(memcpy_toio);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)  * "memset" on IO memory space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) void _memset_c_io(volatile void __iomem *to, unsigned long c, long count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 	/* Handle any initial odd byte */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 	if (count > 0 && ((u64)to & 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 		__raw_writeb(c, to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 		to++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 		count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 	/* Handle any initial odd halfword */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 	if (count >= 2 && ((u64)to & 2)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 		__raw_writew(c, to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 		to += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 		count -= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 	/* Handle any initial odd word */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 	if (count >= 4 && ((u64)to & 4)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 		__raw_writel(c, to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) 		to += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 		count -= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 	/* Handle all full-sized quadwords: we're aligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 	   (or have a small count) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 	count -= 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 	if (count >= 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 		do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 			__raw_writeq(c, to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 			to += 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 			count -= 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 		} while (count >= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 	count += 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 	/* The tail is word-aligned if we still have count >= 4 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 	if (count >= 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 		__raw_writel(c, to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 		to += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 		count -= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 	/* The tail is half-word aligned if we have count >= 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 	if (count >= 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 		__raw_writew(c, to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 		to += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 		count -= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 	/* And finally, one last byte.. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 	if (count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 		__raw_writeb(c, to);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 	mb();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) EXPORT_SYMBOL(_memset_c_io);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) /* A version of memcpy used by the vga console routines to move data around
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)    arbitrarily between screen and main memory.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) scr_memcpyw(u16 *d, const u16 *s, unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 	const u16 __iomem *ios = (const u16 __iomem *) s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 	u16 __iomem *iod = (u16 __iomem *) d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 	int s_isio = __is_ioaddr(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 	int d_isio = __is_ioaddr(d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 	if (s_isio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 		if (d_isio) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 			/* FIXME: Should handle unaligned ops and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 			   operation widening.  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 			count /= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 			while (count--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 				u16 tmp = __raw_readw(ios++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 				__raw_writew(tmp, iod++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 			memcpy_fromio(d, ios, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 		if (d_isio)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 			memcpy_toio(iod, s, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 			memcpy(d, s, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) EXPORT_SYMBOL(scr_memcpyw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) void __iomem *ioport_map(unsigned long port, unsigned int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) 	return IO_CONCAT(__IO_PREFIX,ioportmap) (port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) void ioport_unmap(void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) EXPORT_SYMBOL(ioport_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) EXPORT_SYMBOL(ioport_unmap);