^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) #ifndef _ASM_IA64_IO_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) #define _ASM_IA64_IO_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * This file contains the definitions for the emulated IO instructions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * inb/inw/inl/outb/outw/outl and the "string versions" of the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * versions of the single-IO instructions (inb_p/inw_p/..).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * This file is not meant to be obfuscating: it's just complicated to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * (a) handle it all in a way that makes gcc able to optimize it as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * well as possible and (b) trying to avoid writing the same thing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * over and over again with slight variations and possibly making a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * mistake somewhere.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * Copyright (C) 1998-2003 Hewlett-Packard Co
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * David Mosberger-Tang <davidm@hpl.hp.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <asm/unaligned.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <asm/early_ioremap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) /* We don't use IO slowdowns on the ia64, but.. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #define __SLOW_DOWN_IO do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #define SLOW_DOWN_IO do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #define __IA64_UNCACHED_OFFSET RGN_BASE(RGN_UNCACHED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * The legacy I/O space defined by the ia64 architecture supports only 65536 ports, but
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * large machines may have multiple other I/O spaces so we can't place any a priori limit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * on IO_SPACE_LIMIT. These additional spaces are described in ACPI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #define IO_SPACE_LIMIT 0xffffffffffffffffUL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define MAX_IO_SPACES_BITS 8
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #define MAX_IO_SPACES (1UL << MAX_IO_SPACES_BITS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define IO_SPACE_BITS 24
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #define IO_SPACE_SIZE (1UL << IO_SPACE_BITS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #define IO_SPACE_NR(port) ((port) >> IO_SPACE_BITS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) #define IO_SPACE_BASE(space) ((space) << IO_SPACE_BITS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #define IO_SPACE_PORT(port) ((port) & (IO_SPACE_SIZE - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #define IO_SPACE_SPARSE_ENCODING(p) ((((p) >> 2) << 12) | ((p) & 0xfff))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) struct io_space {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) unsigned long mmio_base; /* base in MMIO space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) int sparse;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) extern struct io_space io_space[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) extern unsigned int num_io_spaces;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) # ifdef __KERNEL__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * All MMIO iomem cookies are in region 6; anything less is a PIO cookie:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * 0xCxxxxxxxxxxxxxxx MMIO cookie (return from ioremap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * 0x000000001SPPPPPP PIO cookie (S=space number, P..P=port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * ioread/writeX() uses the leading 1 in PIO cookies (PIO_OFFSET) to catch
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * code that uses bare port numbers without the prerequisite pci_iomap().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) #define PIO_OFFSET (1UL << (MAX_IO_SPACES_BITS + IO_SPACE_BITS))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define PIO_MASK (PIO_OFFSET - 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) #define PIO_RESERVED __IA64_UNCACHED_OFFSET
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #define HAVE_ARCH_PIO_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #include <asm/intrinsics.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #include <asm-generic/iomap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) * Change virtual addresses to physical addresses and vv.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) static inline unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) virt_to_phys (volatile void *address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) return (unsigned long) address - PAGE_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #define virt_to_phys virt_to_phys
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) static inline void*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) phys_to_virt (unsigned long address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) return (void *) (address + PAGE_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #define phys_to_virt phys_to_virt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #define ARCH_HAS_VALID_PHYS_ADDR_RANGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) extern u64 kern_mem_attribute (unsigned long phys_addr, unsigned long size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) extern int valid_phys_addr_range (phys_addr_t addr, size_t count); /* efi.c */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) extern int valid_mmap_phys_addr_range (unsigned long pfn, size_t count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * The following two macros are deprecated and scheduled for removal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * Please use the PCI-DMA interface defined in <asm/pci.h> instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #define bus_to_virt phys_to_virt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define virt_to_bus virt_to_phys
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) #define page_to_bus page_to_phys
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) # endif /* KERNEL */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * Memory fence w/accept. This should never be used in code that is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * not IA-64 specific.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) #define __ia64_mf_a() ia64_mfa()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) static inline void*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) __ia64_mk_io_addr (unsigned long port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) struct io_space *space;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) unsigned long offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) space = &io_space[IO_SPACE_NR(port)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) port = IO_SPACE_PORT(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) if (space->sparse)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) offset = IO_SPACE_SPARSE_ENCODING(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) offset = port;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) return (void *) (space->mmio_base | offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) * For the in/out routines, we need to do "mf.a" _after_ doing the I/O access to ensure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) * that the access has completed before executing other I/O accesses. Since we're doing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) * the accesses through an uncachable (UC) translation, the CPU will execute them in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) * program order. However, we still need to tell the compiler not to shuffle them around
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) * during optimization, which is why we use "volatile" pointers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #define inb inb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) static inline unsigned int inb(unsigned long port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) volatile unsigned char *addr = __ia64_mk_io_addr(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) unsigned char ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) ret = *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) __ia64_mf_a();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) #define inw inw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) static inline unsigned int inw(unsigned long port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) volatile unsigned short *addr = __ia64_mk_io_addr(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) unsigned short ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) ret = *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) __ia64_mf_a();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) #define inl inl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) static inline unsigned int inl(unsigned long port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) volatile unsigned int *addr = __ia64_mk_io_addr(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) unsigned int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) ret = *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) __ia64_mf_a();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) #define outb outb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) static inline void outb(unsigned char val, unsigned long port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) volatile unsigned char *addr = __ia64_mk_io_addr(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) *addr = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) __ia64_mf_a();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) #define outw outw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) static inline void outw(unsigned short val, unsigned long port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) volatile unsigned short *addr = __ia64_mk_io_addr(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) *addr = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) __ia64_mf_a();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) #define outl outl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) static inline void outl(unsigned int val, unsigned long port)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) volatile unsigned int *addr = __ia64_mk_io_addr(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) *addr = val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) __ia64_mf_a();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) #define insb insb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) static inline void insb(unsigned long port, void *dst, unsigned long count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) unsigned char *dp = dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) while (count--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) *dp++ = inb(port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) #define insw insw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) static inline void insw(unsigned long port, void *dst, unsigned long count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) unsigned short *dp = dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) while (count--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) put_unaligned(inw(port), dp++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) #define insl insl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) static inline void insl(unsigned long port, void *dst, unsigned long count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) unsigned int *dp = dst;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) while (count--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) put_unaligned(inl(port), dp++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) #define outsb outsb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) static inline void outsb(unsigned long port, const void *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) unsigned long count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) const unsigned char *sp = src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) while (count--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) outb(*sp++, port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) #define outsw outsw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) static inline void outsw(unsigned long port, const void *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) unsigned long count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) const unsigned short *sp = src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) while (count--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) outw(get_unaligned(sp++), port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) #define outsl outsl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) static inline void outsl(unsigned long port, const void *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) unsigned long count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) const unsigned int *sp = src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) while (count--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) outl(get_unaligned(sp++), port);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) # ifdef __KERNEL__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) extern void __iomem * ioremap(unsigned long offset, unsigned long size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) extern void __iomem * ioremap_uc(unsigned long offset, unsigned long size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) extern void iounmap (volatile void __iomem *addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) static inline void __iomem * ioremap_cache (unsigned long phys_addr, unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) return ioremap(phys_addr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) #define ioremap ioremap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) #define ioremap_cache ioremap_cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) #define ioremap_uc ioremap_uc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) #define iounmap iounmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * String version of IO memory access ops:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) extern void memcpy_fromio(void *dst, const volatile void __iomem *src, long n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) extern void memcpy_toio(volatile void __iomem *dst, const void *src, long n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) extern void memset_io(volatile void __iomem *s, int c, long n);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) #define memcpy_fromio memcpy_fromio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) #define memcpy_toio memcpy_toio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) #define memset_io memset_io
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) #define xlate_dev_kmem_ptr xlate_dev_kmem_ptr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) #define xlate_dev_mem_ptr xlate_dev_mem_ptr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) #include <asm-generic/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) #undef PCI_IOBASE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) # endif /* __KERNEL__ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) #endif /* _ASM_IA64_IO_H */