Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * (c) Copyright 2006, 2007 Hewlett-Packard Development Company, L.P.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *	Bjorn Helgaas <bjorn.helgaas@hp.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/compiler.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/efi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <asm/meminit.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) static inline void __iomem *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) __ioremap_uc(unsigned long phys_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) 	return (void __iomem *) (__IA64_UNCACHED_OFFSET | phys_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) void __iomem *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) early_ioremap (unsigned long phys_addr, unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) 	u64 attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 	attr = kern_mem_attribute(phys_addr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) 	if (attr & EFI_MEMORY_WB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) 		return (void __iomem *) phys_to_virt(phys_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 	return __ioremap_uc(phys_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) void __iomem *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) ioremap (unsigned long phys_addr, unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 	void __iomem *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	struct vm_struct *area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) 	unsigned long offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	pgprot_t prot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 	u64 attr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 	unsigned long gran_base, gran_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) 	unsigned long page_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 	 * For things in kern_memmap, we must use the same attribute
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 	 * as the rest of the kernel.  For more details, see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	 * Documentation/ia64/aliasing.rst.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	attr = kern_mem_attribute(phys_addr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	if (attr & EFI_MEMORY_WB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 		return (void __iomem *) phys_to_virt(phys_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	else if (attr & EFI_MEMORY_UC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 		return __ioremap_uc(phys_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	 * Some chipsets don't support UC access to memory.  If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	 * WB is supported for the whole granule, we prefer that.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	gran_base = GRANULEROUNDDOWN(phys_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	gran_size = GRANULEROUNDUP(phys_addr + size) - gran_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	if (efi_mem_attribute(gran_base, gran_size) & EFI_MEMORY_WB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 		return (void __iomem *) phys_to_virt(phys_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	 * WB is not supported for the whole granule, so we can't use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	 * the region 7 identity mapping.  If we can safely cover the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	 * area with kernel page table mappings, we can use those
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	 * instead.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	page_base = phys_addr & PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	size = PAGE_ALIGN(phys_addr + size) - page_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	if (efi_mem_attribute(page_base, size) & EFI_MEMORY_WB) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 		prot = PAGE_KERNEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 		 * Mappings have to be page-aligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 		offset = phys_addr & ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 		phys_addr &= PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 		 * Ok, go for it..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 		area = get_vm_area(size, VM_IOREMAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 		if (!area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 		area->phys_addr = phys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 		addr = (void __iomem *) area->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 		if (ioremap_page_range((unsigned long) addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 				(unsigned long) addr + size, phys_addr, prot)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 			vunmap((void __force *) addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 			return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 		return (void __iomem *) (offset + (char __iomem *)addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	return __ioremap_uc(phys_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) EXPORT_SYMBOL(ioremap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) void __iomem *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) ioremap_uc(unsigned long phys_addr, unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 	if (kern_mem_attribute(phys_addr, size) & EFI_MEMORY_WB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	return __ioremap_uc(phys_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) EXPORT_SYMBOL(ioremap_uc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) early_iounmap (volatile void __iomem *addr, unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) iounmap (volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	if (REGION_NUMBER(addr) == RGN_GATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 		vunmap((void *) ((unsigned long) addr & PAGE_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) EXPORT_SYMBOL(iounmap);