Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   3)  * linux/arch/sh/mm/init.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   5)  *  Copyright (C) 1999  Niibe Yutaka
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   6)  *  Copyright (C) 2002 - 2011  Paul Mundt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   7)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   8)  *  Based on linux/arch/i386/mm/init.c:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   9)  *   Copyright (C) 1995  Linus Torvalds
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  10)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  11) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  12) #include <linux/swap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  13) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  14) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  15) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  16) #include <linux/proc_fs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  17) #include <linux/pagemap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  18) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  19) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  20) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  21) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  22) #include <asm/mmu_context.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  23) #include <asm/mmzone.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  24) #include <asm/kexec.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  25) #include <asm/tlb.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  26) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  27) #include <asm/sections.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  28) #include <asm/setup.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  29) #include <asm/cache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  30) #include <asm/pgalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  31) #include <linux/sizes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  32) #include "ioremap.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  33) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  34) pgd_t swapper_pg_dir[PTRS_PER_PGD];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  35) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  36) void __init generic_mem_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  38) 	memblock_add(__MEMORY_START, __MEMORY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  39) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  40) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  41) void __init __weak plat_mem_setup(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  42) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  43) 	/* Nothing to see here, move along. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  45) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  46) #ifdef CONFIG_MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  47) static pte_t *__get_pte_phys(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  48) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  49) 	pgd_t *pgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  50) 	p4d_t *p4d;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  51) 	pud_t *pud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  52) 	pmd_t *pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  53) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  54) 	pgd = pgd_offset_k(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  55) 	if (pgd_none(*pgd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  56) 		pgd_ERROR(*pgd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  57) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  58) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  59) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  60) 	p4d = p4d_alloc(NULL, pgd, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  61) 	if (unlikely(!p4d)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  62) 		p4d_ERROR(*p4d);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  63) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  64) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  65) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  66) 	pud = pud_alloc(NULL, p4d, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  67) 	if (unlikely(!pud)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  68) 		pud_ERROR(*pud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  69) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  70) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  71) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  72) 	pmd = pmd_alloc(NULL, pud, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  73) 	if (unlikely(!pmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  74) 		pmd_ERROR(*pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  75) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  76) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  77) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  78) 	return pte_offset_kernel(pmd, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  79) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  80) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  81) static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  82) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  83) 	pte_t *pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  84) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  85) 	pte = __get_pte_phys(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  86) 	if (!pte_none(*pte)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  87) 		pte_ERROR(*pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  88) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  89) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  90) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  91) 	set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  92) 	local_flush_tlb_one(get_asid(), addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  93) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  94) 	if (pgprot_val(prot) & _PAGE_WIRED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  95) 		tlb_wire_entry(NULL, addr, *pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  97) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  98) static void clear_pte_phys(unsigned long addr, pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  99) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) 	pte_t *pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) 	pte = __get_pte_phys(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) 	if (pgprot_val(prot) & _PAGE_WIRED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) 		tlb_unwire_entry();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) 	set_pte(pte, pfn_pte(0, __pgprot(0)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) 	local_flush_tlb_one(get_asid(), addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) 	unsigned long address = __fix_to_virt(idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) 	if (idx >= __end_of_fixed_addresses) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 	set_pte_phys(address, phys, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) void __clear_fixmap(enum fixed_addresses idx, pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) 	unsigned long address = __fix_to_virt(idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) 	if (idx >= __end_of_fixed_addresses) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) 		BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) 	clear_pte_phys(address, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) static pmd_t * __init one_md_table_init(pud_t *pud)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) 	if (pud_none(*pud)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) 		pmd_t *pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) 		pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) 		if (!pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) 			panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) 			      __func__, PAGE_SIZE, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) 		pud_populate(&init_mm, pud, pmd);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) 		BUG_ON(pmd != pmd_offset(pud, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) 	return pmd_offset(pud, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) static pte_t * __init one_page_table_init(pmd_t *pmd)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) 	if (pmd_none(*pmd)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) 		pte_t *pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) 		pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) 		if (!pte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) 			panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) 			      __func__, PAGE_SIZE, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) 		pmd_populate_kernel(&init_mm, pmd, pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) 		BUG_ON(pte != pte_offset_kernel(pmd, 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) 	return pte_offset_kernel(pmd, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) static pte_t * __init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) 					    unsigned long vaddr, pte_t *lastpte)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) 	return pte;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) void __init page_table_range_init(unsigned long start, unsigned long end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) 					 pgd_t *pgd_base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) 	pgd_t *pgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) 	pud_t *pud;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) 	pmd_t *pmd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) 	pte_t *pte = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) 	int i, j, k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) 	unsigned long vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) 	vaddr = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) 	i = pgd_index(vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) 	j = pud_index(vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) 	k = pmd_index(vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) 	pgd = pgd_base + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) 	for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) 		pud = (pud_t *)pgd;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) 		for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) 			pmd = one_md_table_init(pud);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) #ifndef __PAGETABLE_PMD_FOLDED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) 			pmd += k;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) 			for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) 				pte = page_table_kmap_check(one_page_table_init(pmd),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) 							    pmd, vaddr, pte);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) 				vaddr += PMD_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) 			k = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) 		j = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) #endif	/* CONFIG_MMU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) void __init allocate_pgdat(unsigned int nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) 	unsigned long start_pfn, end_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) 	get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) #ifdef CONFIG_NEED_MULTIPLE_NODES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) 	NODE_DATA(nid) = memblock_alloc_try_nid(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) 				sizeof(struct pglist_data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) 				SMP_CACHE_BYTES, MEMBLOCK_LOW_LIMIT,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) 				MEMBLOCK_ALLOC_ACCESSIBLE, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) 	if (!NODE_DATA(nid))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) 		panic("Can't allocate pgdat for node %d\n", nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) 	NODE_DATA(nid)->node_start_pfn = start_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) 	NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) static void __init do_init_bootmem(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) 	unsigned long start_pfn, end_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) 	/* Add active regions with valid PFNs. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) 		__add_active_range(0, start_pfn, end_pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) 	/* All of system RAM sits in node 0 for the non-NUMA case */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) 	allocate_pgdat(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) 	node_set_online(0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) 	plat_mem_setup();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) 	sparse_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) static void __init early_reserve_mem(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) 	unsigned long start_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) 	u32 zero_base = (u32)__MEMORY_START + (u32)PHYSICAL_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) 	u32 start = zero_base + (u32)CONFIG_ZERO_PAGE_OFFSET;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) 	 * Partially used pages are not usable - thus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) 	 * we are rounding upwards:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) 	start_pfn = PFN_UP(__pa(_end));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) 	 * Reserve the kernel text and Reserve the bootmem bitmap. We do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) 	 * this in two steps (first step was init_bootmem()), because
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) 	 * this catches the (definitely buggy) case of us accidentally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) 	 * initializing the bootmem allocator with an invalid RAM area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) 	memblock_reserve(start, (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) 	 * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) 	if (CONFIG_ZERO_PAGE_OFFSET != 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) 		memblock_reserve(zero_base, CONFIG_ZERO_PAGE_OFFSET);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) 	 * Handle additional early reservations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) 	check_for_initrd();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) 	reserve_crashkernel();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) void __init paging_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) 	unsigned long max_zone_pfns[MAX_NR_ZONES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) 	unsigned long vaddr, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) 	sh_mv.mv_mem_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) 	early_reserve_mem();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) 	 * Once the early reservations are out of the way, give the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) 	 * platforms a chance to kick out some memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) 	if (sh_mv.mv_mem_reserve)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) 		sh_mv.mv_mem_reserve();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) 	memblock_enforce_memory_limit(memory_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) 	memblock_allow_resize();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) 	memblock_dump_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) 	 * Determine low and high memory ranges:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) 	max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) 	min_low_pfn = __MEMORY_START >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) 	nodes_clear(node_online_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) 	memory_start = (unsigned long)__va(__MEMORY_START);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) 	memory_end = memory_start + (memory_limit ?: memblock_phys_mem_size());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) 	uncached_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) 	pmb_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) 	do_init_bootmem();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) 	ioremap_fixed_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) 	/* We don't need to map the kernel through the TLB, as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) 	 * it is permanatly mapped using P1. So clear the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) 	 * entire pgd. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) 	memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) 	/* Set an initial value for the MMU.TTB so we don't have to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) 	 * check for a null value. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) 	set_TTB(swapper_pg_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) 	 * Populate the relevant portions of swapper_pg_dir so that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) 	 * we can use the fixmap entries without calling kmalloc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) 	 * pte's will be filled in by __set_fixmap().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) 	vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) 	end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) 	page_table_range_init(vaddr, end, swapper_pg_dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) 	kmap_coherent_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) 	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) 	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) 	free_area_init(max_zone_pfns);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) unsigned int mem_init_done = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) void __init mem_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) 	pg_data_t *pgdat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) 	high_memory = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) 	for_each_online_pgdat(pgdat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) 		high_memory = max_t(void *, high_memory,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) 				    __va(pgdat_end_pfn(pgdat) << PAGE_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) 	memblock_free_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) 	/* Set this up early, so we can take care of the zero page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) 	cpu_cache_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) 	/* clear the zero-page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) 	memset(empty_zero_page, 0, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) 	__flush_wback_region(empty_zero_page, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) 	vsyscall_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) 	mem_init_print_info(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) 	pr_info("virtual kernel memory layout:\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) 		"    fixmap  : 0x%08lx - 0x%08lx   (%4ld kB)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) #ifdef CONFIG_HIGHMEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) 		"    pkmap   : 0x%08lx - 0x%08lx   (%4ld kB)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) 		"    vmalloc : 0x%08lx - 0x%08lx   (%4ld MB)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) 		"    lowmem  : 0x%08lx - 0x%08lx   (%4ld MB) (cached)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) #ifdef CONFIG_UNCACHED_MAPPING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) 		"            : 0x%08lx - 0x%08lx   (%4ld MB) (uncached)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) 		"      .init : 0x%08lx - 0x%08lx   (%4ld kB)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) 		"      .data : 0x%08lx - 0x%08lx   (%4ld kB)\n"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) 		"      .text : 0x%08lx - 0x%08lx   (%4ld kB)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) 		FIXADDR_START, FIXADDR_TOP,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) 		(FIXADDR_TOP - FIXADDR_START) >> 10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) #ifdef CONFIG_HIGHMEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) 		PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) 		(LAST_PKMAP*PAGE_SIZE) >> 10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) 		(unsigned long)VMALLOC_START, VMALLOC_END,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) 		(VMALLOC_END - VMALLOC_START) >> 20,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) 		(unsigned long)memory_start, (unsigned long)high_memory,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) 		((unsigned long)high_memory - (unsigned long)memory_start) >> 20,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) #ifdef CONFIG_UNCACHED_MAPPING
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) 		uncached_start, uncached_end, uncached_size >> 20,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) 		(unsigned long)&__init_begin, (unsigned long)&__init_end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) 		((unsigned long)&__init_end -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) 		 (unsigned long)&__init_begin) >> 10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) 		(unsigned long)&_etext, (unsigned long)&_edata,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) 		((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) 		(unsigned long)&_text, (unsigned long)&_etext,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) 		((unsigned long)&_etext - (unsigned long)&_text) >> 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) 	mem_init_done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) #ifdef CONFIG_MEMORY_HOTPLUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) int arch_add_memory(int nid, u64 start, u64 size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) 		    struct mhp_params *params)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) 	unsigned long start_pfn = PFN_DOWN(start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) 	unsigned long nr_pages = size >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) 	int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) 	if (WARN_ON_ONCE(params->pgprot.pgprot != PAGE_KERNEL.pgprot))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) 		return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) 	/* We only have ZONE_NORMAL, so this is easy.. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) 	ret = __add_pages(nid, start_pfn, nr_pages, params);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) 	if (unlikely(ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) 		printk("%s: Failed, __add_pages() == %d\n", __func__, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) void arch_remove_memory(int nid, u64 start, u64 size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) 			struct vmem_altmap *altmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) 	unsigned long start_pfn = PFN_DOWN(start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) 	unsigned long nr_pages = size >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) 	__remove_pages(start_pfn, nr_pages, altmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) #endif /* CONFIG_MEMORY_HOTPLUG */