Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * Re-map IO memory to kernel address space so that we can access it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  * This is needed for high PCI addresses that aren't mapped in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  * 640k-1MB IO memory area on PC's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  * (C) Copyright 1995 1996 Linus Torvalds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/ioport.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/mmiotrace.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/mem_encrypt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/efi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/pgtable.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <asm/set_memory.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <asm/e820/api.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <asm/efi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <asm/fixmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include <asm/pgalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #include <asm/memtype.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #include <asm/setup.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #include "physaddr.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33)  * Descriptor controlling ioremap() behavior.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) struct ioremap_desc {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) 	unsigned int flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40)  * Fix up the linear direct mapping of the kernel to avoid cache attribute
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41)  * conflicts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) int ioremap_change_attr(unsigned long vaddr, unsigned long size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 			enum page_cache_mode pcm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) 	unsigned long nrpages = size >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) 	int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	switch (pcm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	case _PAGE_CACHE_MODE_UC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 		err = _set_memory_uc(vaddr, nrpages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	case _PAGE_CACHE_MODE_WC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 		err = _set_memory_wc(vaddr, nrpages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 	case _PAGE_CACHE_MODE_WT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 		err = _set_memory_wt(vaddr, nrpages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	case _PAGE_CACHE_MODE_WB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 		err = _set_memory_wb(vaddr, nrpages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 	return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) /* Does the range (or a subset of) contain normal RAM? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) static unsigned int __ioremap_check_ram(struct resource *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 	unsigned long start_pfn, stop_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 	if ((res->flags & IORESOURCE_SYSTEM_RAM) != IORESOURCE_SYSTEM_RAM)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 	start_pfn = (res->start + PAGE_SIZE - 1) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	stop_pfn = (res->end + 1) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	if (stop_pfn > start_pfn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 		for (i = 0; i < (stop_pfn - start_pfn); ++i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) 			if (pfn_valid(start_pfn + i) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 			    !PageReserved(pfn_to_page(start_pfn + i)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 				return IORES_MAP_SYSTEM_RAM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90)  * In a SEV guest, NONE and RESERVED should not be mapped encrypted because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91)  * there the whole memory is already encrypted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) static unsigned int __ioremap_check_encrypted(struct resource *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 	if (!sev_active())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) 	switch (res->desc) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) 	case IORES_DESC_NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	case IORES_DESC_RESERVED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 		return IORES_MAP_ENCRYPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)  * The EFI runtime services data area is not covered by walk_mem_res(), but must
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111)  * be mapped encrypted when SEV is active.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) static void __ioremap_check_other(resource_size_t addr, struct ioremap_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	if (!sev_active())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	if (!IS_ENABLED(CONFIG_EFI))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) 	if (efi_mem_type(addr) == EFI_RUNTIME_SERVICES_DATA ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	    (efi_mem_type(addr) == EFI_BOOT_SERVICES_DATA &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) 	     efi_mem_attributes(addr) & EFI_MEMORY_RUNTIME))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 		desc->flags |= IORES_MAP_ENCRYPTED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) static int __ioremap_collect_map_flags(struct resource *res, void *arg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	struct ioremap_desc *desc = arg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	if (!(desc->flags & IORES_MAP_SYSTEM_RAM))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 		desc->flags |= __ioremap_check_ram(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	if (!(desc->flags & IORES_MAP_ENCRYPTED))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 		desc->flags |= __ioremap_check_encrypted(res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	return ((desc->flags & (IORES_MAP_SYSTEM_RAM | IORES_MAP_ENCRYPTED)) ==
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 			       (IORES_MAP_SYSTEM_RAM | IORES_MAP_ENCRYPTED));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)  * To avoid multiple resource walks, this function walks resources marked as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)  * IORESOURCE_MEM and IORESOURCE_BUSY and looking for system RAM and/or a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)  * resource described not as IORES_DESC_NONE (e.g. IORES_DESC_ACPI_TABLES).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)  * After that, deal with misc other ranges in __ioremap_check_other() which do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)  * not fall into the above category.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) static void __ioremap_check_mem(resource_size_t addr, unsigned long size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 				struct ioremap_desc *desc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) 	u64 start, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 	start = (u64)addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	end = start + size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 	memset(desc, 0, sizeof(struct ioremap_desc));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	walk_mem_res(start, end, desc, __ioremap_collect_map_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 	__ioremap_check_other(addr, desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)  * Remap an arbitrary physical address space into the kernel virtual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)  * address space. It transparently creates kernel huge I/O mapping when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)  * the physical address is aligned by a huge page size (1GB or 2MB) and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167)  * the requested size is at least the huge page size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169)  * NOTE: MTRRs can override PAT memory types with a 4KB granularity.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)  * Therefore, the mapping code falls back to use a smaller page toward 4KB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)  * when a mapping range is covered by non-WB type of MTRRs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)  * NOTE! We need to allow non-page-aligned mappings too: we will obviously
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)  * have to convert them into an offset in a page-aligned mapping, but the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)  * caller shouldn't need to know that small detail.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) static void __iomem *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) __ioremap_caller(resource_size_t phys_addr, unsigned long size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 		 enum page_cache_mode pcm, void *caller, bool encrypted)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	unsigned long offset, vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	resource_size_t last_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	const resource_size_t unaligned_phys_addr = phys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	const unsigned long unaligned_size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	struct ioremap_desc io_desc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	struct vm_struct *area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	enum page_cache_mode new_pcm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 	pgprot_t prot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	int retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 	void __iomem *ret_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 	/* Don't allow wraparound or zero size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	last_addr = phys_addr + size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 	if (!size || last_addr < phys_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 	if (!phys_addr_valid(phys_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 		printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 		       (unsigned long long)phys_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 		WARN_ON_ONCE(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	__ioremap_check_mem(phys_addr, size, &io_desc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 	 * Don't allow anybody to remap normal RAM that we're using..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) 	if (io_desc.flags & IORES_MAP_SYSTEM_RAM) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 		WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 			  &phys_addr, &last_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	 * Mappings have to be page-aligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 	offset = phys_addr & ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	phys_addr &= PHYSICAL_PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	size = PAGE_ALIGN(last_addr+1) - phys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 	retval = memtype_reserve(phys_addr, (u64)phys_addr + size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 						pcm, &new_pcm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	if (retval) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) 		printk(KERN_ERR "ioremap memtype_reserve failed %d\n", retval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	if (pcm != new_pcm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 		if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 			printk(KERN_ERR
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 		"ioremap error for 0x%llx-0x%llx, requested 0x%x, got 0x%x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 				(unsigned long long)phys_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 				(unsigned long long)(phys_addr + size),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 				pcm, new_pcm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 			goto err_free_memtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 		pcm = new_pcm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	 * If the page being mapped is in memory and SEV is active then
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 	 * make sure the memory encryption attribute is enabled in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	 * resulting mapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) 	prot = PAGE_KERNEL_IO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	if ((io_desc.flags & IORES_MAP_ENCRYPTED) || encrypted)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 		prot = pgprot_encrypted(prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	switch (pcm) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	case _PAGE_CACHE_MODE_UC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 		prot = __pgprot(pgprot_val(prot) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 				cachemode2protval(_PAGE_CACHE_MODE_UC));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 	case _PAGE_CACHE_MODE_UC_MINUS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 		prot = __pgprot(pgprot_val(prot) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 				cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	case _PAGE_CACHE_MODE_WC:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 		prot = __pgprot(pgprot_val(prot) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 				cachemode2protval(_PAGE_CACHE_MODE_WC));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	case _PAGE_CACHE_MODE_WT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 		prot = __pgprot(pgprot_val(prot) |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 				cachemode2protval(_PAGE_CACHE_MODE_WT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	case _PAGE_CACHE_MODE_WB:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	 * Ok, go for it..
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	area = get_vm_area_caller(size, VM_IOREMAP, caller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) 	if (!area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 		goto err_free_memtype;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	area->phys_addr = phys_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 	vaddr = (unsigned long) area->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	if (memtype_kernel_map_sync(phys_addr, size, pcm))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 		goto err_free_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 	if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 		goto err_free_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	ret_addr = (void __iomem *) (vaddr + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	 * Check if the request spans more than any BAR in the iomem resource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	 * tree.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	if (iomem_map_sanity_check(unaligned_phys_addr, unaligned_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 		pr_warn("caller %pS mapping multiple BARs\n", caller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	return ret_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) err_free_area:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	free_vm_area(area);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) err_free_memtype:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	memtype_free(phys_addr, phys_addr + size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306)  * ioremap     -   map bus memory into CPU space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)  * @phys_addr:    bus address of the memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)  * @size:      size of the resource to map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)  * ioremap performs a platform specific sequence of operations to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)  * make bus memory CPU accessible via the readb/readw/readl/writeb/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312)  * writew/writel functions and the other mmio helpers. The returned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)  * address is not guaranteed to be usable directly as a virtual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)  * address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316)  * This version of ioremap ensures that the memory is marked uncachable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)  * on the CPU as well as honouring existing caching rules from things like
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318)  * the PCI bus. Note that there are other caches and buffers on many
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)  * busses. In particular driver authors should read up on PCI writes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)  * It's useful if some control registers are in such an area and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)  * write combining or read caching is not desirable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324)  * Must be freed with iounmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) void __iomem *ioremap(resource_size_t phys_addr, unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	 * Ideally, this should be:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	 *	pat_enabled() ? _PAGE_CACHE_MODE_UC : _PAGE_CACHE_MODE_UC_MINUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	 *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 	 * Till we fix all X drivers to use ioremap_wc(), we will use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	 * UC MINUS. Drivers that are certain they need or can already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 	 * be converted over to strong UC can use ioremap_uc().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC_MINUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 	return __ioremap_caller(phys_addr, size, pcm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 				__builtin_return_address(0), false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) EXPORT_SYMBOL(ioremap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)  * ioremap_uc     -   map bus memory into CPU space as strongly uncachable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)  * @phys_addr:    bus address of the memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346)  * @size:      size of the resource to map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348)  * ioremap_uc performs a platform specific sequence of operations to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349)  * make bus memory CPU accessible via the readb/readw/readl/writeb/
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)  * writew/writel functions and the other mmio helpers. The returned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351)  * address is not guaranteed to be usable directly as a virtual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352)  * address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354)  * This version of ioremap ensures that the memory is marked with a strong
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355)  * preference as completely uncachable on the CPU when possible. For non-PAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)  * systems this ends up setting page-attribute flags PCD=1, PWT=1. For PAT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)  * systems this will set the PAT entry for the pages as strong UC.  This call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358)  * will honor existing caching rules from things like the PCI bus. Note that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)  * there are other caches and buffers on many busses. In particular driver
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)  * authors should read up on PCI writes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)  * It's useful if some control registers are in such an area and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363)  * write combining or read caching is not desirable:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)  * Must be freed with iounmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) void __iomem *ioremap_uc(resource_size_t phys_addr, unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 	return __ioremap_caller(phys_addr, size, pcm,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 				__builtin_return_address(0), false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) EXPORT_SYMBOL_GPL(ioremap_uc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)  * ioremap_wc	-	map memory into CPU space write combined
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)  * @phys_addr:	bus address of the memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)  * @size:	size of the resource to map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)  * This version of ioremap ensures that the memory is marked write combining.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)  * Write combining allows faster writes to some hardware devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)  * Must be freed with iounmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 	return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 					__builtin_return_address(0), false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) EXPORT_SYMBOL(ioremap_wc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)  * ioremap_wt	-	map memory into CPU space write through
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395)  * @phys_addr:	bus address of the memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)  * @size:	size of the resource to map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)  * This version of ioremap ensures that the memory is marked write through.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)  * Write through stores data into memory while keeping the cache up-to-date.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401)  * Must be freed with iounmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) void __iomem *ioremap_wt(resource_size_t phys_addr, unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 					__builtin_return_address(0), false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) EXPORT_SYMBOL(ioremap_wt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) void __iomem *ioremap_encrypted(resource_size_t phys_addr, unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 				__builtin_return_address(0), true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) EXPORT_SYMBOL(ioremap_encrypted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 				__builtin_return_address(0), false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) EXPORT_SYMBOL(ioremap_cache);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 				unsigned long prot_val)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 	return __ioremap_caller(phys_addr, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 				pgprot2cachemode(__pgprot(prot_val)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 				__builtin_return_address(0), false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) EXPORT_SYMBOL(ioremap_prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)  * iounmap - Free a IO remapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435)  * @addr: virtual address from ioremap_*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)  * Caller must ensure there is only one unmapping for the same pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) void iounmap(volatile void __iomem *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) 	struct vm_struct *p, *o;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) 	if ((void __force *)addr <= high_memory)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	 * The PCI/ISA range special-casing was removed from __ioremap()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 	 * so this check, in theory, can be removed. However, there are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 	 * cases where iounmap() is called for addresses not obtained via
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 	 * ioremap() (vga16fb for example). Add a warning so that these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 	 * cases can be caught and fixed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 	if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	    (void __force *)addr < phys_to_virt(ISA_END_ADDRESS)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 		WARN(1, "iounmap() called for ISA range not obtained using ioremap()\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 	mmiotrace_iounmap(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 	addr = (volatile void __iomem *)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 		(PAGE_MASK & (unsigned long __force)addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 	/* Use the vm area unlocked, assuming the caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 	   ensures there isn't another iounmap for the same address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 	   in parallel. Reuse of the virtual address is prevented by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 	   leaving it in the global lists until we're done with it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 	   cpa takes care of the direct mappings. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 	p = find_vm_area((void __force *)addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) 	if (!p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 		printk(KERN_ERR "iounmap: bad address %p\n", addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) 		dump_stack();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 	memtype_free(p->phys_addr, p->phys_addr + get_vm_area_size(p));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) 	/* Finally remove it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 	o = remove_vm_area((void __force *)addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 	BUG_ON(p != o || o == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) 	kfree(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) EXPORT_SYMBOL(iounmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) int __init arch_ioremap_p4d_supported(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) int __init arch_ioremap_pud_supported(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) 	return boot_cpu_has(X86_FEATURE_GBPAGES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) int __init arch_ioremap_pmd_supported(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) 	return boot_cpu_has(X86_FEATURE_PSE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)  * Convert a physical pointer to a virtual kernel pointer for /dev/mem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507)  * access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) void *xlate_dev_mem_ptr(phys_addr_t phys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	unsigned long start  = phys &  PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) 	unsigned long offset = phys & ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 	void *vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) 	/* memremap() maps if RAM, otherwise falls back to ioremap() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) 	vaddr = memremap(start, PAGE_SIZE, MEMREMAP_WB);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 	/* Only add the offset on success and return NULL if memremap() failed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 	if (vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 		vaddr += offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 	return vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	memunmap((void *)((unsigned long)addr & PAGE_MASK));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)  * Examine the physical address to determine if it is an area of memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532)  * that should be mapped decrypted.  If the memory is not part of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)  * kernel usable area it was accessed and created decrypted, so these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534)  * areas should be mapped decrypted. And since the encryption key can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)  * change across reboots, persistent memory should also be mapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536)  * decrypted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)  * If SEV is active, that implies that BIOS/UEFI also ran encrypted so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539)  * only persistent memory should be mapped decrypted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) static bool memremap_should_map_decrypted(resource_size_t phys_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 					  unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 	int is_pmem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 	 * Check if the address is part of a persistent memory region.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) 	 * This check covers areas added by E820, EFI and ACPI.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) 	is_pmem = region_intersects(phys_addr, size, IORESOURCE_MEM,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) 				    IORES_DESC_PERSISTENT_MEMORY);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) 	if (is_pmem != REGION_DISJOINT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) 	 * Check if the non-volatile attribute is set for an EFI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 	 * reserved area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 	if (efi_enabled(EFI_BOOT)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 		switch (efi_mem_type(phys_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 		case EFI_RESERVED_TYPE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 			if (efi_mem_attributes(phys_addr) & EFI_MEMORY_NV)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 				return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 		default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) 	/* Check if the address is outside kernel usable area */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) 	switch (e820__get_entry_type(phys_addr, phys_addr + size - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 	case E820_TYPE_RESERVED:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 	case E820_TYPE_ACPI:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) 	case E820_TYPE_NVS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 	case E820_TYPE_UNUSABLE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) 		/* For SEV, these areas are encrypted */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) 		if (sev_active())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) 		fallthrough;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) 	case E820_TYPE_PRAM:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)  * Examine the physical address to determine if it is EFI data. Check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592)  * it against the boot params structure and EFI tables and memory types.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) static bool memremap_is_efi_data(resource_size_t phys_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 				 unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) 	u64 paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 	/* Check if the address is part of EFI boot/runtime data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 	if (!efi_enabled(EFI_BOOT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) 	paddr = boot_params.efi_info.efi_memmap_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 	paddr <<= 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) 	paddr |= boot_params.efi_info.efi_memmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) 	if (phys_addr == paddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 	paddr = boot_params.efi_info.efi_systab_hi;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 	paddr <<= 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 	paddr |= boot_params.efi_info.efi_systab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) 	if (phys_addr == paddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) 	if (efi_is_table_address(phys_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) 	switch (efi_mem_type(phys_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) 	case EFI_BOOT_SERVICES_DATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 	case EFI_RUNTIME_SERVICES_DATA:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 	default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 		break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)  * Examine the physical address to determine if it is boot data by checking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)  * it against the boot params setup_data chain.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) static bool memremap_is_setup_data(resource_size_t phys_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 				   unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 	struct setup_indirect *indirect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) 	struct setup_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) 	u64 paddr, paddr_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) 	paddr = boot_params.hdr.setup_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) 	while (paddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) 		unsigned int len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) 		if (phys_addr == paddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) 		data = memremap(paddr, sizeof(*data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) 				MEMREMAP_WB | MEMREMAP_DEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) 		if (!data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) 			pr_warn("failed to memremap setup_data entry\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) 		paddr_next = data->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) 		len = data->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) 		if ((phys_addr > paddr) && (phys_addr < (paddr + len))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) 			memunmap(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) 		if (data->type == SETUP_INDIRECT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) 			memunmap(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) 			data = memremap(paddr, sizeof(*data) + len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) 					MEMREMAP_WB | MEMREMAP_DEC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) 			if (!data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) 				pr_warn("failed to memremap indirect setup_data\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) 				return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) 			indirect = (struct setup_indirect *)data->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) 			if (indirect->type != SETUP_INDIRECT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) 				paddr = indirect->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) 				len = indirect->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) 		memunmap(data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) 		if ((phys_addr > paddr) && (phys_addr < (paddr + len)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) 		paddr = paddr_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691)  * Examine the physical address to determine if it is boot data by checking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692)  * it against the boot params setup_data chain (early boot version).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) static bool __init early_memremap_is_setup_data(resource_size_t phys_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) 						unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) 	struct setup_indirect *indirect;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) 	struct setup_data *data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) 	u64 paddr, paddr_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) 	paddr = boot_params.hdr.setup_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) 	while (paddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) 		unsigned int len, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) 		if (phys_addr == paddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) 		data = early_memremap_decrypted(paddr, sizeof(*data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) 		if (!data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) 			pr_warn("failed to early memremap setup_data entry\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) 		size = sizeof(*data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) 		paddr_next = data->next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) 		len = data->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) 		if ((phys_addr > paddr) && (phys_addr < (paddr + len))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) 			early_memunmap(data, sizeof(*data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) 		if (data->type == SETUP_INDIRECT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) 			size += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) 			early_memunmap(data, sizeof(*data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) 			data = early_memremap_decrypted(paddr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) 			if (!data) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) 				pr_warn("failed to early memremap indirect setup_data\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) 				return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) 			indirect = (struct setup_indirect *)data->data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) 			if (indirect->type != SETUP_INDIRECT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) 				paddr = indirect->addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) 				len = indirect->len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) 		early_memunmap(data, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) 		if ((phys_addr > paddr) && (phys_addr < (paddr + len)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) 			return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) 		paddr = paddr_next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753)  * Architecture function to determine if RAM remap is allowed. By default, a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)  * RAM remap will map the data as encrypted. Determine if a RAM remap should
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)  * not be done so that the data will be mapped decrypted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) bool arch_memremap_can_ram_remap(resource_size_t phys_addr, unsigned long size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) 				 unsigned long flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) 	if (!mem_encrypt_active())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) 	if (flags & MEMREMAP_ENC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) 	if (flags & MEMREMAP_DEC)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) 	if (sme_active()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) 		if (memremap_is_setup_data(phys_addr, size) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) 		    memremap_is_efi_data(phys_addr, size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) 			return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) 	return !memremap_should_map_decrypted(phys_addr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779)  * Architecture override of __weak function to adjust the protection attributes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780)  * used when remapping memory. By default, early_memremap() will map the data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)  * as encrypted. Determine if an encrypted mapping should not be done and set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)  * the appropriate protection attributes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) pgprot_t __init early_memremap_pgprot_adjust(resource_size_t phys_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) 					     unsigned long size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) 					     pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) 	bool encrypted_prot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) 	if (!mem_encrypt_active())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) 		return prot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) 	encrypted_prot = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) 	if (sme_active()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) 		if (early_memremap_is_setup_data(phys_addr, size) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) 		    memremap_is_efi_data(phys_addr, size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) 			encrypted_prot = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) 	if (encrypted_prot && memremap_should_map_decrypted(phys_addr, size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) 		encrypted_prot = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) 	return encrypted_prot ? pgprot_encrypted(prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) 			      : pgprot_decrypted(prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) bool phys_mem_access_encrypted(unsigned long phys_addr, unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) 	return arch_memremap_can_ram_remap(phys_addr, size, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) #ifdef CONFIG_AMD_MEM_ENCRYPT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) /* Remap memory with encryption */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) void __init *early_memremap_encrypted(resource_size_t phys_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) 				      unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) 	return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_ENC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822)  * Remap memory with encryption and write-protected - cannot be called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823)  * before pat_init() is called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) void __init *early_memremap_encrypted_wp(resource_size_t phys_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) 					 unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) 	if (!x86_has_pat_wp())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) 	return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_ENC_WP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) /* Remap memory without encryption */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) void __init *early_memremap_decrypted(resource_size_t phys_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) 				      unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) 	return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841)  * Remap memory without encryption and write-protected - cannot be called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)  * before pat_init() is called
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) void __init *early_memremap_decrypted_wp(resource_size_t phys_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) 					 unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) 	if (!x86_has_pat_wp())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) 	return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC_WP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) #endif	/* CONFIG_AMD_MEM_ENCRYPT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) 	/* Don't assume we're using swapper_pg_dir at this point */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) 	pgd_t *base = __va(read_cr3_pa());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) 	pgd_t *pgd = &base[pgd_index(addr)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) 	p4d_t *p4d = p4d_offset(pgd, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) 	pud_t *pud = pud_offset(p4d, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) 	pmd_t *pmd = pmd_offset(pud, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) 	return pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) static inline pte_t * __init early_ioremap_pte(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) 	return &bm_pte[pte_index(addr)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) bool __init is_early_ioremap_ptep(pte_t *ptep)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) 	return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) void __init early_ioremap_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) 	pmd_t *pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) #ifdef CONFIG_X86_64
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) 	BUILD_BUG_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) 	WARN_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) 	early_ioremap_setup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) 	pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) 	memset(bm_pte, 0, sizeof(bm_pte));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) 	pmd_populate_kernel(&init_mm, pmd, bm_pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) 	 * The boot-ioremap range spans multiple pmds, for which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) 	 * we are not prepared:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) #define __FIXADDR_TOP (-PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) 	BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) 		     != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) #undef __FIXADDR_TOP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) 	if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) 		WARN_ON(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) 		printk(KERN_WARNING "pmd %p != %p\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) 		       pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) 		printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) 			fix_to_virt(FIX_BTMAP_BEGIN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) 		printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END):   %08lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) 			fix_to_virt(FIX_BTMAP_END));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) 		printk(KERN_WARNING "FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) 		printk(KERN_WARNING "FIX_BTMAP_BEGIN:     %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) 		       FIX_BTMAP_BEGIN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) void __init __early_set_fixmap(enum fixed_addresses idx,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) 			       phys_addr_t phys, pgprot_t flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) 	unsigned long addr = __fix_to_virt(idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) 	pte_t *pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) 	if (idx >= __end_of_fixed_addresses) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) 	pte = early_ioremap_pte(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) 	/* Sanitize 'prot' against any unsupported bits: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) 	pgprot_val(flags) &= __supported_pte_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) 	if (pgprot_val(flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) 		set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) 		pte_clear(&init_mm, addr, pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) 	flush_tlb_one_kernel(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) }