Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  *  linux/arch/arm/mm/init.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *  Copyright (C) 1995-2005 Russell King
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9) #include <linux/swap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/mman.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/sched/signal.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/sched/task.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/nodemask.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/initrd.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/of_fdt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/dma-map-ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <linux/sizes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <linux/stop_machine.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <linux/swiotlb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <linux/rk-dma-heap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #include <asm/cp15.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #include <asm/mach-types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #include <asm/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #include <asm/memory.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #include <asm/prom.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #include <asm/sections.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) #include <asm/setup.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) #include <asm/set_memory.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) #include <asm/system_info.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) #include <asm/tlb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) #include <asm/fixmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) #include <asm/ptdump.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) #include <asm/mach/arch.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) #include <asm/mach/map.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) #include "mm.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) #ifdef CONFIG_CPU_CP15_MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) unsigned long __init __clear_cr(unsigned long mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) 	cr_alignment = cr_alignment & ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	return cr_alignment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) #ifdef CONFIG_BLK_DEV_INITRD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) static int __init parse_tag_initrd(const struct tag *tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 	pr_warn("ATAG_INITRD is deprecated; "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 		"please update your bootloader.\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 	phys_initrd_size = tag->u.initrd.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) __tagtable(ATAG_INITRD, parse_tag_initrd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) static int __init parse_tag_initrd2(const struct tag *tag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	phys_initrd_start = tag->u.initrd.start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 	phys_initrd_size = tag->u.initrd.size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) __tagtable(ATAG_INITRD2, parse_tag_initrd2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) static void __init find_limits(unsigned long *min, unsigned long *max_low,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 			       unsigned long *max_high)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	*max_low = PFN_DOWN(memblock_get_current_limit());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) 	*min = PFN_UP(memblock_start_of_DRAM());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 	*max_high = PFN_DOWN(memblock_end_of_DRAM());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) #ifdef CONFIG_ZONE_DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) phys_addr_t arm_dma_zone_size __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) EXPORT_SYMBOL(arm_dma_zone_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89)  * The DMA mask corresponding to the maximum bus address allocatable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90)  * using GFP_DMA.  The default here places no restriction on DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91)  * allocations.  This must be the smallest DMA mask in the system,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92)  * so a successful GFP_DMA allocation will always satisfy this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) phys_addr_t arm_dma_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) unsigned long arm_dma_pfn_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) void __init setup_dma_zone(const struct machine_desc *mdesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) #ifdef CONFIG_ZONE_DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 	if (mdesc->dma_zone_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 		arm_dma_zone_size = mdesc->dma_zone_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 		arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	} else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 		arm_dma_limit = 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 	arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) static void __init zone_sizes_init(unsigned long min, unsigned long max_low,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) 	unsigned long max_high)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) #ifdef CONFIG_ZONE_DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 	max_zone_pfn[ZONE_DMA] = min(arm_dma_pfn_limit, max_low);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	max_zone_pfn[ZONE_NORMAL] = max_low;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #ifdef CONFIG_HIGHMEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	max_zone_pfn[ZONE_HIGHMEM] = max_high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 	free_area_init(max_zone_pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #ifdef CONFIG_HAVE_ARCH_PFN_VALID
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) int pfn_valid(unsigned long pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 	phys_addr_t addr = __pfn_to_phys(pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 	unsigned long pageblock_size = PAGE_SIZE * pageblock_nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 	if (__phys_to_pfn(addr) != pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) 	 * If address less than pageblock_size bytes away from a present
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) 	 * memory chunk there still will be a memory map entry for it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	 * because we round freed memory map to the pageblock boundaries.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 	if (memblock_overlaps_region(&memblock.memory,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 				     ALIGN_DOWN(addr, pageblock_size),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 				     pageblock_size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 		return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) EXPORT_SYMBOL(pfn_valid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) static bool arm_memblock_steal_permitted = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	phys_addr_t phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 	BUG_ON(!arm_memblock_steal_permitted);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 	phys = memblock_phys_alloc(size, align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 	if (!phys)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 		panic("Failed to steal %pa bytes at %pS\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 		      &size, (void *)_RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	memblock_free(phys, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 	memblock_remove(phys, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) 	return phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) static void __init arm_initrd_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) #ifdef CONFIG_BLK_DEV_INITRD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) 	phys_addr_t start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 	unsigned long size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 	initrd_start = initrd_end = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	if (!phys_initrd_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	 * Round the memory region to page boundaries as per free_initrd_mem()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	 * This allows us to detect whether the pages overlapping the initrd
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 	 * are in use, but more importantly, reserves the entire set of pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	 * as we don't want these pages allocated for other purposes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	start = round_down(phys_initrd_start, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	size = phys_initrd_size + (phys_initrd_start - start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	size = round_up(size, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	if (!memblock_is_region_memory(start, size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 		pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region - disabling initrd\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 		       (u64)start, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) 	if (memblock_is_region_reserved(start, size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 		pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region - disabling initrd\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 		       (u64)start, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 	memblock_reserve(start, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 	/* Now convert initrd to virtual addresses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	initrd_start = __phys_to_virt(phys_initrd_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) 	initrd_end = initrd_start + phys_initrd_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) #ifdef CONFIG_CPU_ICACHE_MISMATCH_WORKAROUND
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) void check_cpu_icache_size(int cpuid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	u32 size, ctr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) 	asm("mrc p15, 0, %0, c0, c0, 1" : "=r" (ctr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 	size = 1 << ((ctr & 0xf) + 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 	if (cpuid != 0 && icache_size != size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 		pr_info("CPU%u: detected I-Cache line size mismatch, workaround enabled\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 			cpuid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 	if (icache_size > size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) 		icache_size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) void __init arm_memblock_init(const struct machine_desc *mdesc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) 	/* Register the kernel text, kernel data and initrd with memblock. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) 	memblock_reserve(__pa(KERNEL_START), KERNEL_END - KERNEL_START);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	arm_initrd_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	arm_mm_memblock_reserve();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 	/* reserve any platform specific memblock areas */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 	if (mdesc->reserve)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 		mdesc->reserve();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	early_init_fdt_scan_reserved_mem();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	/* reserve memory for DMA contiguous allocations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 	dma_contiguous_reserve(arm_dma_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	rk_dma_heap_cma_setup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 	arm_memblock_steal_permitted = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) 	memblock_dump_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) void __init bootmem_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 	memblock_allow_resize();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	find_limits(&min_low_pfn, &max_low_pfn, &max_pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	early_memtest((phys_addr_t)min_low_pfn << PAGE_SHIFT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 		      (phys_addr_t)max_low_pfn << PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	 * sparse_init() tries to allocate memory from memblock, so must be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	 * done after the fixed reservations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	sparse_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 	 * Now free the memory - free_area_init needs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	 * the sparse mem_map arrays initialized by sparse_init()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	 * for memmap_init_zone(), otherwise all PFNs are invalid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	zone_sizes_init(min_low_pfn, max_low_pfn, max_pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)  * Poison init memory with an undefined instruction (ARM) or a branch to an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273)  * undefined instruction (Thumb).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) static inline void poison_init_mem(void *s, size_t count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 	u32 *p = (u32 *)s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) 	for (; count != 0; count -= 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) 		*p++ = 0xe7fddef0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) static inline void __init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) free_memmap(unsigned long start_pfn, unsigned long end_pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	struct page *start_pg, *end_pg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 	phys_addr_t pg, pgend;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	 * Convert start_pfn/end_pfn to a struct page pointer.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	start_pg = pfn_to_page(start_pfn - 1) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 	end_pg = pfn_to_page(end_pfn - 1) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	 * Convert to physical addresses, and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 	 * round start upwards and end downwards.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 	pg = PAGE_ALIGN(__pa(start_pg));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	pgend = __pa(end_pg) & PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	 * If there are free pages between these,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	 * free the section of the memmap array.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	if (pg < pgend)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 		memblock_free_early(pg, pgend - pg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310)  * The mem_map array can get very big.  Free the unused area of the memory map.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) static void __init free_unused_memmap(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 	unsigned long start, end, prev_end = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	 * This relies on each bank being in address order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 	 * The banks are sorted previously in bootmem_init().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) #ifdef CONFIG_SPARSEMEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 		 * Take care not to free memmap entries that don't exist
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 		 * due to SPARSEMEM sections which aren't present.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 		start = min(start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 				 ALIGN(prev_end, PAGES_PER_SECTION));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 		 * Align down here since many operations in VM subsystem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 		 * presume that there are no holes in the memory map inside
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 		 * a pageblock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 		start = round_down(start, pageblock_nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) 		 * If we had a previous bank, and there is a space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 		 * between the current bank and the previous, free it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 		if (prev_end && prev_end < start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) 			free_memmap(prev_end, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 		 * Align up here since many operations in VM subsystem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 		 * presume that there are no holes in the memory map inside
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 		 * a pageblock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 		prev_end = ALIGN(end, pageblock_nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) #ifdef CONFIG_SPARSEMEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 		prev_end = ALIGN(end, pageblock_nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 		free_memmap(prev_end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 			    ALIGN(prev_end, PAGES_PER_SECTION));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) static void __init free_highpages(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) #ifdef CONFIG_HIGHMEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 	unsigned long max_low = max_low_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) 	phys_addr_t range_start, range_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 	u64 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 	/* set highmem page free */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 	for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) 				&range_start, &range_end, NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 		unsigned long start = PFN_UP(range_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) 		unsigned long end = PFN_DOWN(range_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 		/* Ignore complete lowmem entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 		if (end <= max_low)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 		/* Truncate partial highmem entries */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) 		if (start < max_low)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 			start = max_low;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) 		for (; start < end; start++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 			free_highmem_page(pfn_to_page(start));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389)  * mem_init() marks the free areas in the mem_map and tells us how much
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)  * memory is free.  This is done after various parts of the system have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)  * claimed their memory after the kernel image.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) void __init mem_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) #ifdef CONFIG_ARM_LPAE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 	if (swiotlb_force == SWIOTLB_FORCE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 	    max_pfn > arm_dma_pfn_limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 		swiotlb_init(1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 		swiotlb_force = SWIOTLB_NO_FORCE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 	set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) 	/* this will put all unused low memory onto the freelists */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 	free_unused_memmap();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) 	memblock_free_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) #ifdef CONFIG_SA1111
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) 	/* now that our DMA memory is actually so designated, we can free it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	free_reserved_area(__va(PHYS_OFFSET), swapper_pg_dir, -1, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 	free_highpages();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 	mem_init_print_info(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	 * Check boundaries twice: Some fundamental inconsistencies can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	 * be detected at build time already.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) #ifdef CONFIG_MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	BUILD_BUG_ON(TASK_SIZE				> MODULES_VADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) 	BUG_ON(TASK_SIZE 				> MODULES_VADDR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) #ifdef CONFIG_HIGHMEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) 	BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE	> PAGE_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) #ifdef CONFIG_STRICT_KERNEL_RWX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) struct section_perm {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) 	const char *name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) 	unsigned long start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) 	unsigned long end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) 	pmdval_t mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) 	pmdval_t prot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) 	pmdval_t clear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) /* First section-aligned location at or after __start_rodata. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) extern char __start_rodata_section_aligned[];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) static struct section_perm nx_perms[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) 	/* Make pages tables, etc before _stext RW (set NX). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) 		.name	= "pre-text NX",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) 		.start	= PAGE_OFFSET,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) 		.end	= (unsigned long)_stext,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) 		.mask	= ~PMD_SECT_XN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) 		.prot	= PMD_SECT_XN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) 	/* Make init RW (set NX). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) 		.name	= "init NX",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) 		.start	= (unsigned long)__init_begin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) 		.end	= (unsigned long)_sdata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) 		.mask	= ~PMD_SECT_XN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) 		.prot	= PMD_SECT_XN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) 	/* Make rodata NX (set RO in ro_perms below). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) 		.name	= "rodata NX",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) 		.start  = (unsigned long)__start_rodata_section_aligned,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) 		.end    = (unsigned long)__init_begin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) 		.mask   = ~PMD_SECT_XN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) 		.prot   = PMD_SECT_XN,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) static struct section_perm ro_perms[] = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) 	/* Make kernel code and rodata RX (set RO). */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) 	{
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 		.name	= "text/rodata RO",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 		.start  = (unsigned long)_stext,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) 		.end    = (unsigned long)__init_begin,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) #ifdef CONFIG_ARM_LPAE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) 		.mask   = ~(L_PMD_SECT_RDONLY | PMD_SECT_AP2),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) 		.prot   = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) 		.mask   = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) 		.prot   = PMD_SECT_APX | PMD_SECT_AP_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) 		.clear  = PMD_SECT_AP_WRITE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) 	},
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491)  * Updates section permissions only for the current mm (sections are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)  * copied into each mm). During startup, this is the init_mm. Is only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)  * safe to be called with preemption disabled, as under stop_machine().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) static inline void section_update(unsigned long addr, pmdval_t mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) 				  pmdval_t prot, struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) 	pmd_t *pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) 	pmd = pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, addr), addr), addr), addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) #ifdef CONFIG_ARM_LPAE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) 	pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) 	if (addr & SECTION_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) 		pmd[1] = __pmd((pmd_val(pmd[1]) & mask) | prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) 		pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) 	flush_pmd_entry(pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) 	local_flush_tlb_kernel_range(addr, addr + SECTION_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) /* Make sure extended page tables are in use. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) static inline bool arch_has_strict_perms(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) 	if (cpu_architecture() < CPU_ARCH_ARMv6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) 	return !!(get_cr() & CR_XP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) static void set_section_perms(struct section_perm *perms, int n, bool set,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) 			      struct mm_struct *mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) 	size_t i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) 	unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) 	if (!arch_has_strict_perms())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) 	for (i = 0; i < n; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) 		if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) 		    !IS_ALIGNED(perms[i].end, SECTION_SIZE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) 			pr_err("BUG: %s section %lx-%lx not aligned to %lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) 				perms[i].name, perms[i].start, perms[i].end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) 				SECTION_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) 		for (addr = perms[i].start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) 		     addr < perms[i].end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) 		     addr += SECTION_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) 			section_update(addr, perms[i].mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) 				set ? perms[i].prot : perms[i].clear, mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551)  * update_sections_early intended to be called only through stop_machine
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552)  * framework and executed by only one CPU while all other CPUs will spin and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)  * wait, so no locking is required in this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) static void update_sections_early(struct section_perm perms[], int n)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) 	struct task_struct *t, *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) 	for_each_process(t) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) 		if (t->flags & PF_KTHREAD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) 		for_each_thread(t, s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) 			if (s->mm)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) 				set_section_perms(perms, n, true, s->mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) 	set_section_perms(perms, n, true, current->active_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) 	set_section_perms(perms, n, true, &init_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) static int __fix_kernmem_perms(void *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) 	update_sections_early(nx_perms, ARRAY_SIZE(nx_perms));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) static void fix_kernmem_perms(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) 	stop_machine(__fix_kernmem_perms, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) static int __mark_rodata_ro(void *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) 	update_sections_early(ro_perms, ARRAY_SIZE(ro_perms));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) static int kernel_set_to_readonly __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) void mark_rodata_ro(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) 	kernel_set_to_readonly = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) 	stop_machine(__mark_rodata_ro, NULL, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) 	debug_checkwx();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) void set_kernel_text_rw(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) 	if (!kernel_set_to_readonly)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) 	set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) 				current->active_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) void set_kernel_text_ro(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) 	if (!kernel_set_to_readonly)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) 	set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) 				current->active_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) static inline void fix_kernmem_perms(void) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) #endif /* CONFIG_STRICT_KERNEL_RWX */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) void free_initmem(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) 	fix_kernmem_perms();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) 	poison_init_mem(__init_begin, __init_end - __init_begin);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) 	if (!machine_is_integrator() && !machine_is_cintegrator())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) 		free_initmem_default(-1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) #ifdef CONFIG_BLK_DEV_INITRD
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) void free_initrd_mem(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) 	if (start == initrd_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) 		start = round_down(start, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) 	if (end == initrd_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) 		end = round_up(end, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) 	poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) 	free_reserved_area((void *)start, (void *)end, -1, "initrd");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) #endif