^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * arch/parisc/lib/io.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) Matthew Wilcox 2001 for Hewlett-Packard
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (c) Randolph Chung 2001 <tausq@debian.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * IO accessing functions which shouldn't be inlined because they're too big
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) /* Copies a block of memory to a device in an efficient manner.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * Assumes the device can cope with 32-bit transfers. If it can't,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * don't use this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) void memcpy_toio(volatile void __iomem *dst, const void *src, int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) if (((unsigned long)dst & 3) != ((unsigned long)src & 3))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) goto bytecopy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) while ((unsigned long)dst & 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) writeb(*(char *)src, dst++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) src++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) while (count > 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) __raw_writel(*(u32 *)src, dst);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) src += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) dst += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) count -= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) bytecopy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) while (count--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) writeb(*(char *)src, dst++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) src++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) ** Copies a block of memory from a device in an efficient manner.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) ** Assumes the device can cope with 32-bit transfers. If it can't,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) ** don't use this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) **
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) ** CR16 counts on C3000 reading 256 bytes from Symbios 896 RAM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) ** 27341/64 = 427 cyc per int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) ** 61311/128 = 478 cyc per short
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) ** 122637/256 = 479 cyc per byte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) ** Ergo bus latencies dominant (not transfer size).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) ** Minimize total number of transfers at cost of CPU cycles.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) ** TODO: only look at src alignment and adjust the stores to dest.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) void memcpy_fromio(void *dst, const volatile void __iomem *src, int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) /* first compare alignment of src/dst */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) if ( (((unsigned long)dst ^ (unsigned long)src) & 1) || (count < 2) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) goto bytecopy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) if ( (((unsigned long)dst ^ (unsigned long)src) & 2) || (count < 4) )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) goto shortcopy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) /* Then check for misaligned start address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) if ((unsigned long)src & 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) *(u8 *)dst = readb(src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) src++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) dst++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) if (count < 2) goto bytecopy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) if ((unsigned long)src & 2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) *(u16 *)dst = __raw_readw(src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) src += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) dst += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) count -= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) while (count > 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) *(u32 *)dst = __raw_readl(src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) dst += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) src += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) count -= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) shortcopy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) while (count > 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) *(u16 *)dst = __raw_readw(src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) src += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) dst += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) count -= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) bytecopy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) while (count--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) *(char *)dst = readb(src);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) src++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) dst++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) /* Sets a block of memory on a device to a given value.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * Assumes the device can cope with 32-bit transfers. If it can't,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) * don't use this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) void memset_io(volatile void __iomem *addr, unsigned char val, int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) u32 val32 = (val << 24) | (val << 16) | (val << 8) | val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) while ((unsigned long)addr & 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) writeb(val, addr++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) while (count > 3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) __raw_writel(val32, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) addr += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) count -= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) while (count--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) writeb(val, addr++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * Read COUNT 8-bit bytes from port PORT into memory starting at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * SRC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) void insb (unsigned long port, void *dst, unsigned long count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) unsigned char *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) p = (unsigned char *)dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) while (((unsigned long)p) & 0x3) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) if (!count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) *p = inb(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) p++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) while (count >= 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) unsigned int w;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) count -= 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) w = inb(port) << 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) w |= inb(port) << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) w |= inb(port) << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) w |= inb(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) *(unsigned int *) p = w;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) p += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) while (count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) --count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) *p = inb(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) p++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * Read COUNT 16-bit words from port PORT into memory starting at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * SRC. SRC must be at least short aligned. This is used by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) * IDE driver to read disk sectors. Performance is important, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * the interfaces seems to be slow: just using the inlined version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * of the inw() breaks things.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) void insw (unsigned long port, void *dst, unsigned long count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) unsigned int l = 0, l2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) unsigned char *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) p = (unsigned char *)dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) if (!count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) switch (((unsigned long)p) & 0x3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) case 0x00: /* Buffer 32-bit aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) while (count>=2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) count -= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) l = cpu_to_le16(inw(port)) << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) l |= cpu_to_le16(inw(port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) *(unsigned int *)p = l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) p += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) if (count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) *(unsigned short *)p = cpu_to_le16(inw(port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) case 0x02: /* Buffer 16-bit aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) *(unsigned short *)p = cpu_to_le16(inw(port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) p += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) while (count>=2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) count -= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) l = cpu_to_le16(inw(port)) << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) l |= cpu_to_le16(inw(port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) *(unsigned int *)p = l;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) p += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) if (count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) *(unsigned short *)p = cpu_to_le16(inw(port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) case 0x01: /* Buffer 8-bit aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) case 0x03:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) /* I don't bother with 32bit transfers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * in this case, 16bit will have to do -- DE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) --count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) l = cpu_to_le16(inw(port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) *p = l >> 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) p++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) while (count--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) l2 = cpu_to_le16(inw(port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) *(unsigned short *)p = (l & 0xff) << 8 | (l2 >> 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) p += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) l = l2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) *p = l & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * Read COUNT 32-bit words from port PORT into memory starting at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * SRC. Now works with any alignment in SRC. Performance is important,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * but the interfaces seems to be slow: just using the inlined version
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * of the inl() breaks things.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) void insl (unsigned long port, void *dst, unsigned long count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) unsigned int l = 0, l2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) unsigned char *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) p = (unsigned char *)dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) if (!count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) switch (((unsigned long) dst) & 0x3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) case 0x00: /* Buffer 32-bit aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) while (count--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) *(unsigned int *)p = cpu_to_le32(inl(port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) p += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) case 0x02: /* Buffer 16-bit aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) --count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) l = cpu_to_le32(inl(port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) *(unsigned short *)p = l >> 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) p += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) while (count--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) l2 = cpu_to_le32(inl(port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) *(unsigned int *)p = (l & 0xffff) << 16 | (l2 >> 16);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) p += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) l = l2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) *(unsigned short *)p = l & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) case 0x01: /* Buffer 8-bit aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) --count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) l = cpu_to_le32(inl(port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) *(unsigned char *)p = l >> 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) p++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) *(unsigned short *)p = (l >> 8) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) p += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) while (count--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) l2 = cpu_to_le32(inl(port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) *(unsigned int *)p = (l & 0xff) << 24 | (l2 >> 8);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) p += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) l = l2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) *p = l & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) case 0x03: /* Buffer 8-bit aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) --count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) l = cpu_to_le32(inl(port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) *p = l >> 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) p++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) while (count--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) l2 = cpu_to_le32(inl(port));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) *(unsigned int *)p = (l & 0xffffff) << 8 | l2 >> 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) p += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) l = l2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) *(unsigned short *)p = (l >> 8) & 0xffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) p += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) *p = l & 0xff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * Like insb but in the opposite direction.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * Don't worry as much about doing aligned memory transfers:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * doing byte reads the "slow" way isn't nearly as slow as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) * doing byte writes the slow way (no r-m-w cycle).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) void outsb(unsigned long port, const void * src, unsigned long count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) const unsigned char *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) p = (const unsigned char *)src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) while (count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) outb(*p, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) p++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) * Like insw but in the opposite direction. This is used by the IDE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * driver to write disk sectors. Performance is important, but the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) * interfaces seems to be slow: just using the inlined version of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) * outw() breaks things.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) void outsw (unsigned long port, const void *src, unsigned long count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) unsigned int l = 0, l2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) const unsigned char *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) p = (const unsigned char *)src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) if (!count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) switch (((unsigned long)p) & 0x3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) case 0x00: /* Buffer 32-bit aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) while (count>=2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) count -= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) l = *(unsigned int *)p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) p += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) outw(le16_to_cpu(l >> 16), port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) outw(le16_to_cpu(l & 0xffff), port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) if (count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) outw(le16_to_cpu(*(unsigned short*)p), port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) case 0x02: /* Buffer 16-bit aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) outw(le16_to_cpu(*(unsigned short*)p), port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) p += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) while (count>=2) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) count -= 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) l = *(unsigned int *)p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) p += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) outw(le16_to_cpu(l >> 16), port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) outw(le16_to_cpu(l & 0xffff), port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) if (count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) outw(le16_to_cpu(*(unsigned short *)p), port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) case 0x01: /* Buffer 8-bit aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) /* I don't bother with 32bit transfers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * in this case, 16bit will have to do -- DE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) l = *p << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) p++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) while (count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) count--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) l2 = *(unsigned short *)p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) p += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) outw(le16_to_cpu(l | l2 >> 8), port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) l = l2 << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) l2 = *(unsigned char *)p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) outw (le16_to_cpu(l | l2>>8), port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) * Like insl but in the opposite direction. This is used by the IDE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) * driver to write disk sectors. Works with any alignment in SRC.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) * Performance is important, but the interfaces seems to be slow:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) * just using the inlined version of the outl() breaks things.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) void outsl (unsigned long port, const void *src, unsigned long count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) unsigned int l = 0, l2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) const unsigned char *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) p = (const unsigned char *)src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) if (!count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) switch (((unsigned long)p) & 0x3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) case 0x00: /* Buffer 32-bit aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) while (count--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) outl(le32_to_cpu(*(unsigned int *)p), port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) p += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) case 0x02: /* Buffer 16-bit aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) --count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) l = *(unsigned short *)p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) p += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) while (count--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) l2 = *(unsigned int *)p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) p += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) outl (le32_to_cpu(l << 16 | l2 >> 16), port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) l = l2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) l2 = *(unsigned short *)p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) outl (le32_to_cpu(l << 16 | l2), port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) case 0x01: /* Buffer 8-bit aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) --count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) l = *p << 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) p++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) l |= *(unsigned short *)p << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) p += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) while (count--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) l2 = *(unsigned int *)p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) p += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) outl (le32_to_cpu(l | l2 >> 24), port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) l = l2 << 8;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) l2 = *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) outl (le32_to_cpu(l | l2), port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) case 0x03: /* Buffer 8-bit aligned */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) --count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) l = *p << 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) p++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) while (count--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) l2 = *(unsigned int *)p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) p += 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) outl (le32_to_cpu(l | l2 >> 8), port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) l = l2 << 24;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) l2 = *(unsigned short *)p << 16;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) p += 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) l2 |= *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) outl (le32_to_cpu(l | l2), port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) EXPORT_SYMBOL(insb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) EXPORT_SYMBOL(insw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) EXPORT_SYMBOL(insl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) EXPORT_SYMBOL(outsb);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) EXPORT_SYMBOL(outsw);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) EXPORT_SYMBOL(outsl);