Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2)  * arch/sh/mm/ioremap.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * (C) Copyright 1995 1996 Linus Torvalds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * (C) Copyright 2005 - 2010  Paul Mundt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * Re-map IO memory to kernel address space so that we can access it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  * This is needed for high PCI addresses that aren't mapped in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  * 640k-1MB IO memory area on PC's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11)  * This file is subject to the terms and conditions of the GNU General
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12)  * Public License. See the file "COPYING" in the main directory of this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13)  * archive for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/pci.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <asm/io_trapped.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <asm/pgalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <asm/addrspace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #include <asm/mmu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #include "ioremap.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31)  * On 32-bit SH, we traditionally have the whole physical address space mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32)  * at all times (as MIPS does), so "ioremap()" and "iounmap()" do not need to do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  * anything but place the address in the proper segment.  This is true for P1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  * and P2 addresses, as well as some P3 ones.  However, most of the P3 addresses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35)  * and newer cores using extended addressing need to map through page tables, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36)  * the ioremap() implementation becomes a bit more complicated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) #ifdef CONFIG_29BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) static void __iomem *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) __ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 	phys_addr_t last_addr = offset + size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	 * For P1 and P2 space this is trivial, as everything is already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	 * mapped. Uncached access for P1 addresses are done through P2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	 * In the P3 case or for addresses outside of the 29-bit space,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	 * mapping must be done by the PMB or by using page tables.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 		u64 flags = pgprot_val(prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 		 * Anything using the legacy PTEA space attributes needs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 		 * to be kicked down to page table mappings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 		if (unlikely(flags & _PAGE_PCC_MASK))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 		if (unlikely(flags & _PAGE_CACHABLE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 			return (void __iomem *)P1SEGADDR(offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 		return (void __iomem *)P2SEGADDR(offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	/* P4 above the store queues are always mapped. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	if (unlikely(offset >= P3_ADDR_MAX))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 		return (void __iomem *)P4SEGADDR(offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) #define __ioremap_29bit(offset, size, prot)		NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) #endif /* CONFIG_29BIT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76)  * Remap an arbitrary physical address space into the kernel virtual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77)  * address space. Needed when the kernel wants to access high addresses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78)  * directly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80)  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81)  * have to convert them into an offset in a page-aligned mapping, but the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82)  * caller shouldn't need to know that small detail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) void __iomem * __ref
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) __ioremap_caller(phys_addr_t phys_addr, unsigned long size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 		 pgprot_t pgprot, void *caller)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 	struct vm_struct *area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	unsigned long offset, last_addr, addr, orig_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 	void __iomem *mapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	mapped = __ioremap_trapped(phys_addr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 	if (mapped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 		return mapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	mapped = __ioremap_29bit(phys_addr, size, pgprot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 	if (mapped)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 		return mapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	/* Don't allow wraparound or zero size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	last_addr = phys_addr + size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	if (!size || last_addr < phys_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	 * If we can't yet use the regular approach, go the fixmap route.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	if (!mem_init_done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 		return ioremap_fixed(phys_addr, size, pgprot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) 	 * First try to remap through the PMB.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	 * PMB entries are all pre-faulted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	mapped = pmb_remap_caller(phys_addr, size, pgprot, caller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	if (mapped && !IS_ERR(mapped))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		return mapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	 * Mappings have to be page-aligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	offset = phys_addr & ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	phys_addr &= PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 	size = PAGE_ALIGN(last_addr+1) - phys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	 * Ok, go for it..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	area = get_vm_area_caller(size, VM_IOREMAP, caller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	if (!area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	area->phys_addr = phys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 	orig_addr = addr = (unsigned long)area->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 		vunmap((void *)orig_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 	return (void __iomem *)(offset + (char *)orig_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) EXPORT_SYMBOL(__ioremap_caller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)  * Simple checks for non-translatable mappings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) static inline int iomapping_nontranslatable(unsigned long offset)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) #ifdef CONFIG_29BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) 	 * In 29-bit mode this includes the fixed P1/P2 areas, as well as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	 * parts of P3.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	if (PXSEG(offset) < P3SEG || offset >= P3_ADDR_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) void iounmap(void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	unsigned long vaddr = (unsigned long __force)addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	struct vm_struct *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 	 * Nothing to do if there is no translatable mapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) 	if (iomapping_nontranslatable(vaddr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 	 * There's no VMA if it's from an early fixed mapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 	if (iounmap_fixed(addr) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	 * If the PMB handled it, there's nothing else to do.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	if (pmb_unmap(addr) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	p = remove_vm_area((void *)(vaddr & PAGE_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	if (!p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 		printk(KERN_ERR "%s: bad address %p\n", __func__, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	kfree(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) EXPORT_SYMBOL(iounmap);