Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) #ifndef _ASM_X86_IO_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3) #define _ASM_X86_IO_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  * This file contains the definitions for the x86 IO instructions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * inb/inw/inl/outb/outw/outl and the "string versions" of the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * (insb/insw/insl/outsb/outsw/outsl). You can also use "pausing"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * versions of the single-IO instructions (inb_p/inw_p/..).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  * This file is not meant to be obfuscating: it's just complicated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  * to (a) handle it all in a way that makes gcc able to optimize it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  * as well as possible and (b) trying to avoid writing the same thing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  * over and over again with slight variations and possibly making a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15)  * mistake somewhere.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19)  * Thanks to James van Artsdalen for a better timing-fix than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20)  * the two short jumps: using outb's to a nonexistent port seems
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21)  * to guarantee better timings even on fast machines.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23)  * On the other hand, I'd like to be sure of a non-existent port:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24)  * I feel a bit unsafe about using 0x80 (should be safe, though)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26)  *		Linus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29)  /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30)   *  Bit simplified and optimized by Jan Hubicka
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)   *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)   *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)   *  isa_memset_io, isa_memcpy_fromio, isa_memcpy_toio added,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)   *  isa_read[wl] and isa_write[wl] fixed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35)   *  - Arnaldo Carvalho de Melo <acme@conectiva.com.br>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36)   */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) #define ARCH_HAS_IOREMAP_WC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) #define ARCH_HAS_IOREMAP_WT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) #include <linux/compiler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) #include <asm/early_ioremap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) #include <asm/pgtable_types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) #define build_mmio_read(name, size, type, reg, barrier) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) static inline type name(const volatile void __iomem *addr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) { type ret; asm volatile("mov" size " %1,%0":reg (ret) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) :"m" (*(volatile type __force *)addr) barrier); return ret; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) #define build_mmio_write(name, size, type, reg, barrier) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) static inline void name(type val, volatile void __iomem *addr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) { asm volatile("mov" size " %0,%1": :reg (val), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) "m" (*(volatile type __force *)addr) barrier); }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) build_mmio_read(__readb, "b", unsigned char, "=q", )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) build_mmio_read(__readw, "w", unsigned short, "=r", )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) build_mmio_read(__readl, "l", unsigned int, "=r", )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) build_mmio_write(writew, "w", unsigned short, "r", :"memory")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) build_mmio_write(writel, "l", unsigned int, "r", :"memory")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) build_mmio_write(__writeb, "b", unsigned char, "q", )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) build_mmio_write(__writew, "w", unsigned short, "r", )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) build_mmio_write(__writel, "l", unsigned int, "r", )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) #define readb readb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) #define readw readw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) #define readl readl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) #define readb_relaxed(a) __readb(a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) #define readw_relaxed(a) __readw(a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) #define readl_relaxed(a) __readl(a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) #define __raw_readb __readb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) #define __raw_readw __readw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) #define __raw_readl __readl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) #define writeb writeb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) #define writew writew
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) #define writel writel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) #define writeb_relaxed(v, a) __writeb(v, a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) #define writew_relaxed(v, a) __writew(v, a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) #define writel_relaxed(v, a) __writel(v, a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) #define __raw_writeb __writeb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) #define __raw_writew __writew
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) #define __raw_writel __writel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) build_mmio_read(readq, "q", u64, "=r", :"memory")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) build_mmio_read(__readq, "q", u64, "=r", )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) build_mmio_write(writeq, "q", u64, "r", :"memory")
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) build_mmio_write(__writeq, "q", u64, "r", )
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #define readq_relaxed(a)	__readq(a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #define writeq_relaxed(v, a)	__writeq(v, a)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) #define __raw_readq		__readq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define __raw_writeq		__writeq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /* Let people know that we have them */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define readq			readq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) #define writeq			writeq
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #define ARCH_HAS_VALID_PHYS_ADDR_RANGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) extern int valid_phys_addr_range(phys_addr_t addr, size_t size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)  *	virt_to_phys	-	map virtual addresses to physical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118)  *	@address: address to remap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120)  *	The returned physical address is the physical (CPU) mapping for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)  *	the memory address given. It is only valid to use this function on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)  *	addresses directly mapped or allocated via kmalloc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)  *	This function does not give bus mappings for DMA transfers. In
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125)  *	almost all conceivable cases a device driver should not be using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)  *	this function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) static inline phys_addr_t virt_to_phys(volatile void *address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	return __pa(address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) #define virt_to_phys virt_to_phys
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)  *	phys_to_virt	-	map physical address to virtual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137)  *	@address: address to remap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)  *	The returned virtual address is a current CPU mapping for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)  *	the memory address given. It is only valid to use this function on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)  *	addresses that have a kernel mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)  *	This function does not handle bus mappings for DMA transfers. In
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)  *	almost all conceivable cases a device driver should not be using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)  *	this function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) static inline void *phys_to_virt(phys_addr_t address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	return __va(address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) #define phys_to_virt phys_to_virt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)  * Change "struct page" to physical address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) #define page_to_phys(page)    ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)  * ISA I/O bus memory addresses are 1:1 with the physical address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)  * However, we truncate the address to unsigned int to avoid undesirable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)  * promitions in legacy drivers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) static inline unsigned int isa_virt_to_bus(volatile void *address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	return (unsigned int)virt_to_phys(address);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) #define isa_bus_to_virt		phys_to_virt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)  * However PCI ones are not necessarily 1:1 and therefore these interfaces
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)  * are forbidden in portable PCI drivers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)  * Allow them on x86 for legacy drivers, though.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) #define virt_to_bus virt_to_phys
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) #define bus_to_virt phys_to_virt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)  * The default ioremap() behavior is non-cached; if you need something
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181)  * else, you probably want one of the following.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) extern void __iomem *ioremap_uc(resource_size_t offset, unsigned long size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) #define ioremap_uc ioremap_uc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) #define ioremap_cache ioremap_cache
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size, unsigned long prot_val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) #define ioremap_prot ioremap_prot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) extern void __iomem *ioremap_encrypted(resource_size_t phys_addr, unsigned long size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) #define ioremap_encrypted ioremap_encrypted
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)  * ioremap     -   map bus memory into CPU space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)  * @offset:    bus address of the memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)  * @size:      size of the resource to map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)  * ioremap performs a platform specific sequence of operations to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)  * make bus memory CPU accessible via the readb/readw/readl/writeb/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)  * writew/writel functions and the other mmio helpers. The returned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)  * address is not guaranteed to be usable directly as a virtual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)  * address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)  * If the area you are trying to map is a PCI BAR you should have a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)  * look at pci_iomap().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) void __iomem *ioremap(resource_size_t offset, unsigned long size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) #define ioremap ioremap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) extern void iounmap(volatile void __iomem *addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) #define iounmap iounmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) extern void set_iounmap_nonlazy(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) #ifdef __KERNEL__
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) void memcpy_fromio(void *, const volatile void __iomem *, size_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) void memcpy_toio(volatile void __iomem *, const void *, size_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) void memset_io(volatile void __iomem *, int, size_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) #define memcpy_fromio memcpy_fromio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) #define memcpy_toio memcpy_toio
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) #define memset_io memset_io
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) #include <asm-generic/iomap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)  * ISA space is 'always mapped' on a typical x86 system, no need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)  * explicitly ioremap() it. The fact that the ISA IO space is mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)  * to PAGE_OFFSET is pure coincidence - it does not mean ISA values
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)  * are physical addresses. The following constant pointer can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)  * used as the IO-area pointer (it can be iounmapped as well, so the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)  * analogy with PCI is quite large):
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) #define __ISA_IO_base ((char __iomem *)(PAGE_OFFSET))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) #endif /* __KERNEL__ */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) extern void native_io_delay(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) extern int io_delay_type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) extern void io_delay_init(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) #if defined(CONFIG_PARAVIRT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) #include <asm/paravirt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) static inline void slow_down_io(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	native_io_delay();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) #ifdef REALLY_SLOW_IO
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	native_io_delay();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	native_io_delay();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	native_io_delay();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) #ifdef CONFIG_AMD_MEM_ENCRYPT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) #include <linux/jump_label.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) extern struct static_key_false sev_enable_key;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) static inline bool sev_key_active(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	return static_branch_unlikely(&sev_enable_key);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) #else /* !CONFIG_AMD_MEM_ENCRYPT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) static inline bool sev_key_active(void) { return false; }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) #endif /* CONFIG_AMD_MEM_ENCRYPT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) #define BUILDIO(bwl, bw, type)						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) static inline void out##bwl(unsigned type value, int port)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	asm volatile("out" #bwl " %" #bw "0, %w1"			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 		     : : "a"(value), "Nd"(port));			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) static inline unsigned type in##bwl(int port)				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	unsigned type value;						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	asm volatile("in" #bwl " %w1, %" #bw "0"			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 		     : "=a"(value) : "Nd"(port));			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	return value;							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) }									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) static inline void out##bwl##_p(unsigned type value, int port)		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	out##bwl(value, port);						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	slow_down_io();							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) static inline unsigned type in##bwl##_p(int port)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	unsigned type value = in##bwl(port);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	slow_down_io();							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	return value;							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) }									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) static inline void outs##bwl(int port, const void *addr, unsigned long count) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	if (sev_key_active()) {						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 		unsigned type *value = (unsigned type *)addr;		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 		while (count) {						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 			out##bwl(*value, port);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 			value++;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 			count--;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 		}							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	} else {							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 		asm volatile("rep; outs" #bwl				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 			     : "+S"(addr), "+c"(count)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 			     : "d"(port) : "memory");			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	}								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) }									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) static inline void ins##bwl(int port, void *addr, unsigned long count)	\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) {									\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	if (sev_key_active()) {						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 		unsigned type *value = (unsigned type *)addr;		\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 		while (count) {						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 			*value = in##bwl(port);				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 			value++;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 			count--;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 		}							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	} else {							\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 		asm volatile("rep; ins" #bwl				\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 			     : "+D"(addr), "+c"(count)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 			     : "d"(port) : "memory");			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	}								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) BUILDIO(b, b, char)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) BUILDIO(w, w, short)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) BUILDIO(l, , int)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) #define inb inb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) #define inw inw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) #define inl inl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) #define inb_p inb_p
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) #define inw_p inw_p
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) #define inl_p inl_p
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) #define insb insb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) #define insw insw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) #define insl insl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) #define outb outb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) #define outw outw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) #define outl outl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) #define outb_p outb_p
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) #define outw_p outw_p
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) #define outl_p outl_p
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) #define outsb outsb
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) #define outsw outsw
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) #define outsl outsl
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) extern void *xlate_dev_mem_ptr(phys_addr_t phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) extern void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) #define xlate_dev_mem_ptr xlate_dev_mem_ptr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) extern int ioremap_change_attr(unsigned long vaddr, unsigned long size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 				enum page_cache_mode pcm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) #define ioremap_wc ioremap_wc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) extern void __iomem *ioremap_wt(resource_size_t offset, unsigned long size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) #define ioremap_wt ioremap_wt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) extern bool is_early_ioremap_ptep(pte_t *ptep);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) #define IO_SPACE_LIMIT 0xffff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) #include <asm-generic/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) #undef PCI_IOBASE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) #ifdef CONFIG_MTRR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) extern int __must_check arch_phys_wc_index(int handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) #define arch_phys_wc_index arch_phys_wc_index
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) extern int __must_check arch_phys_wc_add(unsigned long base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 					 unsigned long size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) extern void arch_phys_wc_del(int handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) #define arch_phys_wc_add arch_phys_wc_add
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) #ifdef CONFIG_X86_PAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) extern int arch_io_reserve_memtype_wc(resource_size_t start, resource_size_t size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) extern void arch_io_free_memtype_wc(resource_size_t start, resource_size_t size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) #define arch_io_reserve_memtype_wc arch_io_reserve_memtype_wc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) extern bool arch_memremap_can_ram_remap(resource_size_t offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 					unsigned long size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 					unsigned long flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) #define arch_memremap_can_ram_remap arch_memremap_can_ram_remap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) extern bool phys_mem_access_encrypted(unsigned long phys_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 				      unsigned long size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)  * iosubmit_cmds512 - copy data to single MMIO location, in 512-bit units
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404)  * @dst: destination, in MMIO space (must be 512-bit aligned)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)  * @src: source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)  * @count: number of 512 bits quantities to submit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)  * Submit data from kernel space to MMIO space, in units of 512 bits at a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)  * time.  Order of access is not guaranteed, nor is a memory barrier
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410)  * performed afterwards.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412)  * Warning: Do not use this helper unless your driver has checked that the CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)  * instruction is supported on the platform.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) static inline void iosubmit_cmds512(void __iomem *dst, const void *src,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 				    size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	const u8 *from = src;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	const u8 *end = from + count * 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	while (from < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 		movdir64b(dst, from);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 		from += 64;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) #endif /* _ASM_X86_IO_H */