Orange Pi5 kernel

Deprecated Linux kernel 5.10.110 for OrangePi 5/5B/5+ boards

3 Commits   0 Branches   0 Tags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    3)  * Procedures for maintaining information about logical memory blocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    4)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    5)  * Peter Bergner, IBM Corp.	June 2001.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    6)  * Copyright (C) 2001 Peter Bergner.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    7)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    8) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300    9) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   10) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   11) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   12) #include <linux/bitops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   13) #include <linux/poison.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   14) #include <linux/pfn.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   15) #include <linux/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   16) #include <linux/kmemleak.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   17) #include <linux/seq_file.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   18) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   19) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   20) #include <asm/sections.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   21) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   22) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   23) #include "internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   24) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   25) #define INIT_MEMBLOCK_REGIONS			128
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   26) #define INIT_PHYSMEM_REGIONS			4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   27) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   28) #ifndef INIT_MEMBLOCK_RESERVED_REGIONS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   29) # define INIT_MEMBLOCK_RESERVED_REGIONS		INIT_MEMBLOCK_REGIONS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   30) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   31) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   32) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   33)  * DOC: memblock overview
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   34)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   35)  * Memblock is a method of managing memory regions during the early
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   36)  * boot period when the usual kernel memory allocators are not up and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   37)  * running.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   38)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   39)  * Memblock views the system memory as collections of contiguous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   40)  * regions. There are several types of these collections:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   41)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   42)  * * ``memory`` - describes the physical memory available to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   43)  *   kernel; this may differ from the actual physical memory installed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   44)  *   in the system, for instance when the memory is restricted with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   45)  *   ``mem=`` command line parameter
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   46)  * * ``reserved`` - describes the regions that were allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   47)  * * ``physmem`` - describes the actual physical memory available during
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   48)  *   boot regardless of the possible restrictions and memory hot(un)plug;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   49)  *   the ``physmem`` type is only available on some architectures.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   50)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   51)  * Each region is represented by struct memblock_region that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   52)  * defines the region extents, its attributes and NUMA node id on NUMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   53)  * systems. Every memory type is described by the struct memblock_type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   54)  * which contains an array of memory regions along with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   55)  * the allocator metadata. The "memory" and "reserved" types are nicely
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   56)  * wrapped with struct memblock. This structure is statically
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   57)  * initialized at build time. The region arrays are initially sized to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   58)  * %INIT_MEMBLOCK_REGIONS for "memory" and %INIT_MEMBLOCK_RESERVED_REGIONS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   59)  * for "reserved". The region array for "physmem" is initially sized to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   60)  * %INIT_PHYSMEM_REGIONS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   61)  * The memblock_allow_resize() enables automatic resizing of the region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   62)  * arrays during addition of new regions. This feature should be used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   63)  * with care so that memory allocated for the region array will not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   64)  * overlap with areas that should be reserved, for example initrd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   65)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   66)  * The early architecture setup should tell memblock what the physical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   67)  * memory layout is by using memblock_add() or memblock_add_node()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   68)  * functions. The first function does not assign the region to a NUMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   69)  * node and it is appropriate for UMA systems. Yet, it is possible to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   70)  * use it on NUMA systems as well and assign the region to a NUMA node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   71)  * later in the setup process using memblock_set_node(). The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   72)  * memblock_add_node() performs such an assignment directly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   73)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   74)  * Once memblock is setup the memory can be allocated using one of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   75)  * API variants:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   76)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   77)  * * memblock_phys_alloc*() - these functions return the **physical**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   78)  *   address of the allocated memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   79)  * * memblock_alloc*() - these functions return the **virtual** address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   80)  *   of the allocated memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   81)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   82)  * Note, that both API variants use implicit assumptions about allowed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   83)  * memory ranges and the fallback methods. Consult the documentation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   84)  * of memblock_alloc_internal() and memblock_alloc_range_nid()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   85)  * functions for more elaborate description.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   86)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   87)  * As the system boot progresses, the architecture specific mem_init()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   88)  * function frees all the memory to the buddy page allocator.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   89)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   90)  * Unless an architecture enables %CONFIG_ARCH_KEEP_MEMBLOCK, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   91)  * memblock data structures (except "physmem") will be discarded after the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   92)  * system initialization completes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   93)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   94) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   95) #ifndef CONFIG_NEED_MULTIPLE_NODES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   96) struct pglist_data __refdata contig_page_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   97) EXPORT_SYMBOL(contig_page_data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   98) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300   99) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  100) #if defined(CONFIG_ROCKCHIP_THUNDER_BOOT) && defined(CONFIG_SMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  101) static unsigned long defer_start __initdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  102) static unsigned long defer_end __initdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  103) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  104) #define DEFAULT_DEFER_FREE_BLOCK_SIZE SZ_256M
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  105) static unsigned long defer_free_block_size __initdata =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  106) 	DEFAULT_DEFER_FREE_BLOCK_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  107) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  108) static int __init early_defer_free_block_size(char *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  110) 	defer_free_block_size = memparse(p, &p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  111) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  112) 	pr_debug("defer_free_block_size = 0x%lx\n", defer_free_block_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  113) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  114) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  116) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  117) early_param("defer_free_block_size", early_defer_free_block_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  118) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  120) unsigned long max_low_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  121) unsigned long min_low_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  122) unsigned long max_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  123) unsigned long long max_possible_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  124) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  125) static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  126) static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_RESERVED_REGIONS] __initdata_memblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  127) #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  128) static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  129) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  131) struct memblock memblock __initdata_memblock = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  132) 	.memory.regions		= memblock_memory_init_regions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  133) 	.memory.cnt		= 1,	/* empty dummy entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  134) 	.memory.max		= INIT_MEMBLOCK_REGIONS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  135) 	.memory.name		= "memory",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  136) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  137) 	.reserved.regions	= memblock_reserved_init_regions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  138) 	.reserved.cnt		= 1,	/* empty dummy entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  139) 	.reserved.max		= INIT_MEMBLOCK_RESERVED_REGIONS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  140) 	.reserved.name		= "reserved",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  141) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  142) 	.bottom_up		= false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  143) 	.current_limit		= MEMBLOCK_ALLOC_ANYWHERE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  144) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  145) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  146) #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  147) struct memblock_type physmem = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  148) 	.regions		= memblock_physmem_init_regions,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  149) 	.cnt			= 1,	/* empty dummy entry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  150) 	.max			= INIT_PHYSMEM_REGIONS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  151) 	.name			= "physmem",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  152) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  153) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  154) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  155) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  156)  * keep a pointer to &memblock.memory in the text section to use it in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  157)  * __next_mem_range() and its helpers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  158)  *  For architectures that do not keep memblock data after init, this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  159)  * pointer will be reset to NULL at memblock_discard()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  160)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  161) static __refdata struct memblock_type *memblock_memory = &memblock.memory;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  162) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  163) #define for_each_memblock_type(i, memblock_type, rgn)			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  164) 	for (i = 0, rgn = &memblock_type->regions[0];			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  165) 	     i < memblock_type->cnt;					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  166) 	     i++, rgn = &memblock_type->regions[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  168) #define memblock_dbg(fmt, ...)						\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  169) 	do {								\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  170) 		if (memblock_debug)					\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  171) 			pr_info(fmt, ##__VA_ARGS__);			\
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  172) 	} while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  173) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  174) static int memblock_debug __initdata_memblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  175) static bool system_has_some_mirror __initdata_memblock = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  176) static int memblock_can_resize __initdata_memblock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  177) static int memblock_memory_in_slab __initdata_memblock = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  178) static int memblock_reserved_in_slab __initdata_memblock = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  179) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  180) static enum memblock_flags __init_memblock choose_memblock_flags(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  181) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  182) 	return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  184) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  185) /* adjust *@size so that (@base + *@size) doesn't overflow, return new size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  186) static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  188) 	return *size = min(*size, PHYS_ADDR_MAX - base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  191) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  192)  * Address comparison utilities
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  193)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  194) static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  195) 				       phys_addr_t base2, phys_addr_t size2)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  197) 	return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  198) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  199) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  200) bool __init_memblock memblock_overlaps_region(struct memblock_type *type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  201) 					phys_addr_t base, phys_addr_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  202) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  203) 	unsigned long i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  204) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  205) 	memblock_cap_size(base, &size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  206) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  207) 	for (i = 0; i < type->cnt; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  208) 		if (memblock_addrs_overlap(base, size, type->regions[i].base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  209) 					   type->regions[i].size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  210) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  211) 	return i < type->cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  212) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  213) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  214) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  215)  * __memblock_find_range_bottom_up - find free area utility in bottom-up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  216)  * @start: start of candidate range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  217)  * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  218)  *       %MEMBLOCK_ALLOC_ACCESSIBLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  219)  * @size: size of free area to find
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  220)  * @align: alignment of free area to find
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  221)  * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  222)  * @flags: pick from blocks based on memory attributes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  223)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  224)  * Utility called from memblock_find_in_range_node(), find free area bottom-up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  225)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  226)  * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  227)  * Found address on success, 0 on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  228)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  229) static phys_addr_t __init_memblock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  230) __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  231) 				phys_addr_t size, phys_addr_t align, int nid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  232) 				enum memblock_flags flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  234) 	phys_addr_t this_start, this_end, cand;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  235) 	u64 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  236) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  237) 	for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  238) 		this_start = clamp(this_start, start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  239) 		this_end = clamp(this_end, start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  240) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  241) 		cand = round_up(this_start, align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  242) 		if (cand < this_end && this_end - cand >= size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  243) 			return cand;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  244) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  245) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  246) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  247) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  248) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  249) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  250)  * __memblock_find_range_top_down - find free area utility, in top-down
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  251)  * @start: start of candidate range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  252)  * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  253)  *       %MEMBLOCK_ALLOC_ACCESSIBLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  254)  * @size: size of free area to find
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  255)  * @align: alignment of free area to find
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  256)  * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  257)  * @flags: pick from blocks based on memory attributes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  258)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  259)  * Utility called from memblock_find_in_range_node(), find free area top-down.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  260)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  261)  * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  262)  * Found address on success, 0 on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  263)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  264) static phys_addr_t __init_memblock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  265) __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  266) 			       phys_addr_t size, phys_addr_t align, int nid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  267) 			       enum memblock_flags flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  269) 	phys_addr_t this_start, this_end, cand;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  270) 	u64 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  271) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  272) 	for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  273) 					NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  274) 		this_start = clamp(this_start, start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  275) 		this_end = clamp(this_end, start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  276) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  277) 		if (this_end < size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  278) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  280) 		cand = round_down(this_end - size, align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  281) 		if (cand >= this_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  282) 			return cand;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  283) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  284) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  285) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  288) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  289)  * memblock_find_in_range_node - find free area in given range and node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  290)  * @size: size of free area to find
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  291)  * @align: alignment of free area to find
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  292)  * @start: start of candidate range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  293)  * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  294)  *       %MEMBLOCK_ALLOC_ACCESSIBLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  295)  * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  296)  * @flags: pick from blocks based on memory attributes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  297)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  298)  * Find @size free area aligned to @align in the specified range and node.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  299)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  300)  * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  301)  * Found address on success, 0 on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  302)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  303) static phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  304) 					phys_addr_t align, phys_addr_t start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  305) 					phys_addr_t end, int nid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  306) 					enum memblock_flags flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  307) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  308) 	/* pump up @end */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  309) 	if (end == MEMBLOCK_ALLOC_ACCESSIBLE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  310) 	    end == MEMBLOCK_ALLOC_KASAN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  311) 		end = memblock.current_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  312) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  313) 	/* avoid allocating the first page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  314) 	start = max_t(phys_addr_t, start, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  315) 	end = max(start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  316) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  317) 	if (memblock_bottom_up())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  318) 		return __memblock_find_range_bottom_up(start, end, size, align,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  319) 						       nid, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  320) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  321) 		return __memblock_find_range_top_down(start, end, size, align,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  322) 						      nid, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  323) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  324) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  325) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  326)  * memblock_find_in_range - find free area in given range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  327)  * @start: start of candidate range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  328)  * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  329)  *       %MEMBLOCK_ALLOC_ACCESSIBLE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  330)  * @size: size of free area to find
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  331)  * @align: alignment of free area to find
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  332)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  333)  * Find @size free area aligned to @align in the specified range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  334)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  335)  * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  336)  * Found address on success, 0 on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  337)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  338) phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  339) 					phys_addr_t end, phys_addr_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  340) 					phys_addr_t align)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  342) 	phys_addr_t ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  343) 	enum memblock_flags flags = choose_memblock_flags();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  344) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  345) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  346) 	ret = memblock_find_in_range_node(size, align, start, end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  347) 					    NUMA_NO_NODE, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  348) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  349) 	if (!ret && (flags & MEMBLOCK_MIRROR)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  350) 		pr_warn("Could not allocate %pap bytes of mirrored memory\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  351) 			&size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  352) 		flags &= ~MEMBLOCK_MIRROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  353) 		goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  354) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  355) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  356) 	return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  357) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  358) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  359) static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  360) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  361) 	type->total_size -= type->regions[r].size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  362) 	memmove(&type->regions[r], &type->regions[r + 1],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  363) 		(type->cnt - (r + 1)) * sizeof(type->regions[r]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  364) 	type->cnt--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  365) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  366) 	/* Special case for empty arrays */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  367) 	if (type->cnt == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  368) 		WARN_ON(type->total_size != 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  369) 		type->cnt = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  370) 		type->regions[0].base = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  371) 		type->regions[0].size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  372) 		type->regions[0].flags = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  373) 		memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  374) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  377) #ifndef CONFIG_ARCH_KEEP_MEMBLOCK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  378) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  379)  * memblock_discard - discard memory and reserved arrays if they were allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  380)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  381) void __init memblock_discard(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  382) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  383) 	phys_addr_t addr, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  384) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  385) 	if (memblock.reserved.regions != memblock_reserved_init_regions) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  386) 		addr = __pa(memblock.reserved.regions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  387) 		size = PAGE_ALIGN(sizeof(struct memblock_region) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  388) 				  memblock.reserved.max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  389) 		if (memblock_reserved_in_slab)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  390) 			kfree(memblock.reserved.regions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  391) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  392) 			__memblock_free_late(addr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  393) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  394) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  395) 	if (memblock.memory.regions != memblock_memory_init_regions) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  396) 		addr = __pa(memblock.memory.regions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  397) 		size = PAGE_ALIGN(sizeof(struct memblock_region) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  398) 				  memblock.memory.max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  399) 		if (memblock_memory_in_slab)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  400) 			kfree(memblock.memory.regions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  401) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  402) 			__memblock_free_late(addr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  403) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  404) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  405) 	memblock_memory = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  406) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  407) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  408) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  409) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  410)  * memblock_double_array - double the size of the memblock regions array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  411)  * @type: memblock type of the regions array being doubled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  412)  * @new_area_start: starting address of memory range to avoid overlap with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  413)  * @new_area_size: size of memory range to avoid overlap with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  414)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  415)  * Double the size of the @type regions array. If memblock is being used to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  416)  * allocate memory for a new reserved regions array and there is a previously
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  417)  * allocated memory range [@new_area_start, @new_area_start + @new_area_size]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  418)  * waiting to be reserved, ensure the memory used by the new array does
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  419)  * not overlap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  420)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  421)  * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  422)  * 0 on success, -1 on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  423)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  424) static int __init_memblock memblock_double_array(struct memblock_type *type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  425) 						phys_addr_t new_area_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  426) 						phys_addr_t new_area_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  427) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  428) 	struct memblock_region *new_array, *old_array;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  429) 	phys_addr_t old_alloc_size, new_alloc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  430) 	phys_addr_t old_size, new_size, addr, new_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  431) 	int use_slab = slab_is_available();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  432) 	int *in_slab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  433) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  434) 	/* We don't allow resizing until we know about the reserved regions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  435) 	 * of memory that aren't suitable for allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  436) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  437) 	if (!memblock_can_resize)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  438) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  439) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  440) 	/* Calculate new doubled size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  441) 	old_size = type->max * sizeof(struct memblock_region);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  442) 	new_size = old_size << 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  443) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  444) 	 * We need to allocated new one align to PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  445) 	 *   so we can free them completely later.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  446) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  447) 	old_alloc_size = PAGE_ALIGN(old_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  448) 	new_alloc_size = PAGE_ALIGN(new_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  449) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  450) 	/* Retrieve the slab flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  451) 	if (type == &memblock.memory)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  452) 		in_slab = &memblock_memory_in_slab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  453) 	else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  454) 		in_slab = &memblock_reserved_in_slab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  455) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  456) 	/* Try to find some space for it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  457) 	if (use_slab) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  458) 		new_array = kmalloc(new_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  459) 		addr = new_array ? __pa(new_array) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  460) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  461) 		/* only exclude range when trying to double reserved.regions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  462) 		if (type != &memblock.reserved)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  463) 			new_area_start = new_area_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  464) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  465) 		addr = memblock_find_in_range(new_area_start + new_area_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  466) 						memblock.current_limit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  467) 						new_alloc_size, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  468) 		if (!addr && new_area_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  469) 			addr = memblock_find_in_range(0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  470) 				min(new_area_start, memblock.current_limit),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  471) 				new_alloc_size, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  472) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  473) 		new_array = addr ? __va(addr) : NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  474) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  475) 	if (!addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  476) 		pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  477) 		       type->name, type->max, type->max * 2);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  478) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  479) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  481) 	new_end = addr + new_size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  482) 	memblock_dbg("memblock: %s is doubled to %ld at [%pa-%pa]",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  483) 			type->name, type->max * 2, &addr, &new_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  484) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  485) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  486) 	 * Found space, we now need to move the array over before we add the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  487) 	 * reserved region since it may be our reserved array itself that is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  488) 	 * full.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  489) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  490) 	memcpy(new_array, type->regions, old_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  491) 	memset(new_array + type->max, 0, old_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  492) 	old_array = type->regions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  493) 	type->regions = new_array;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  494) 	type->max <<= 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  495) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  496) 	/* Free old array. We needn't free it if the array is the static one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  497) 	if (*in_slab)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  498) 		kfree(old_array);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  499) 	else if (old_array != memblock_memory_init_regions &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  500) 		 old_array != memblock_reserved_init_regions)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  501) 		memblock_free(__pa(old_array), old_alloc_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  502) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  503) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  504) 	 * Reserve the new array if that comes from the memblock.  Otherwise, we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  505) 	 * needn't do it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  506) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  507) 	if (!use_slab)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  508) 		BUG_ON(memblock_reserve(addr, new_alloc_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  509) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  510) 	/* Update slab flag */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  511) 	*in_slab = use_slab;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  512) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  513) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  515) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  516) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  517)  * memblock_merge_regions - merge neighboring compatible regions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  518)  * @type: memblock type to scan
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  519)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  520)  * Scan @type and merge neighboring compatible regions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  521)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  522) static void __init_memblock memblock_merge_regions(struct memblock_type *type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  523) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  524) 	int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  525) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  526) 	/* cnt never goes below 1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  527) 	while (i < type->cnt - 1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  528) 		struct memblock_region *this = &type->regions[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  529) 		struct memblock_region *next = &type->regions[i + 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  530) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  531) 		if (this->base + this->size != next->base ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  532) 		    memblock_get_region_node(this) !=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  533) 		    memblock_get_region_node(next) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  534) 		    this->flags != next->flags) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  535) 			BUG_ON(this->base + this->size > next->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  536) 			i++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  537) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  538) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  539) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  540) 		this->size += next->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  541) 		/* move forward from next + 1, index of which is i + 2 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  542) 		memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  543) 		type->cnt--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  544) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  546) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  547) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  548)  * memblock_insert_region - insert new memblock region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  549)  * @type:	memblock type to insert into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  550)  * @idx:	index for the insertion point
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  551)  * @base:	base address of the new region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  552)  * @size:	size of the new region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  553)  * @nid:	node id of the new region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  554)  * @flags:	flags of the new region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  555)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  556)  * Insert new memblock region [@base, @base + @size) into @type at @idx.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  557)  * @type must already have extra room to accommodate the new region.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  558)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  559) static void __init_memblock memblock_insert_region(struct memblock_type *type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  560) 						   int idx, phys_addr_t base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  561) 						   phys_addr_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  562) 						   int nid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  563) 						   enum memblock_flags flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  564) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  565) 	struct memblock_region *rgn = &type->regions[idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  567) 	BUG_ON(type->cnt >= type->max);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  568) 	memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  569) 	rgn->base = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  570) 	rgn->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  571) 	rgn->flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  572) 	memblock_set_region_node(rgn, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  573) 	type->cnt++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  574) 	type->total_size += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  575) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  576) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  577) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  578)  * memblock_add_range - add new memblock region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  579)  * @type: memblock type to add new region into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  580)  * @base: base address of the new region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  581)  * @size: size of the new region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  582)  * @nid: nid of the new region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  583)  * @flags: flags of the new region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  584)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  585)  * Add new memblock region [@base, @base + @size) into @type.  The new region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  586)  * is allowed to overlap with existing ones - overlaps don't affect already
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  587)  * existing regions.  @type is guaranteed to be minimal (all neighbouring
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  588)  * compatible regions are merged) after the addition.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  589)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  590)  * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  591)  * 0 on success, -errno on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  592)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  593) static int __init_memblock memblock_add_range(struct memblock_type *type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  594) 				phys_addr_t base, phys_addr_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  595) 				int nid, enum memblock_flags flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  596) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  597) 	bool insert = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  598) 	phys_addr_t obase = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  599) 	phys_addr_t end = base + memblock_cap_size(base, &size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  600) 	int idx, nr_new;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  601) 	struct memblock_region *rgn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  602) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  603) 	if (!size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  604) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  605) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  606) 	/* special case for empty array */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  607) 	if (type->regions[0].size == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  608) 		WARN_ON(type->cnt != 1 || type->total_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  609) 		type->regions[0].base = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  610) 		type->regions[0].size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  611) 		type->regions[0].flags = flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  612) 		memblock_set_region_node(&type->regions[0], nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  613) 		type->total_size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  614) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  615) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  616) repeat:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  617) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  618) 	 * The following is executed twice.  Once with %false @insert and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  619) 	 * then with %true.  The first counts the number of regions needed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  620) 	 * to accommodate the new area.  The second actually inserts them.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  621) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  622) 	base = obase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  623) 	nr_new = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  625) 	for_each_memblock_type(idx, type, rgn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  626) 		phys_addr_t rbase = rgn->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  627) 		phys_addr_t rend = rbase + rgn->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  628) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  629) 		if (rbase >= end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  630) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  631) 		if (rend <= base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  632) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  633) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  634) 		 * @rgn overlaps.  If it separates the lower part of new
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  635) 		 * area, insert that portion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  636) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  637) 		if (rbase > base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  638) #ifdef CONFIG_NEED_MULTIPLE_NODES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  639) 			WARN_ON(nid != memblock_get_region_node(rgn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  640) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  641) 			WARN_ON(flags != rgn->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  642) 			nr_new++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  643) 			if (insert)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  644) 				memblock_insert_region(type, idx++, base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  645) 						       rbase - base, nid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  646) 						       flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  647) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  648) 		/* area below @rend is dealt with, forget about it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  649) 		base = min(rend, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  650) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  651) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  652) 	/* insert the remaining portion */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  653) 	if (base < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  654) 		nr_new++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  655) 		if (insert)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  656) 			memblock_insert_region(type, idx, base, end - base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  657) 					       nid, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  658) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  659) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  660) 	if (!nr_new)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  661) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  662) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  663) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  664) 	 * If this was the first round, resize array and repeat for actual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  665) 	 * insertions; otherwise, merge and return.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  666) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  667) 	if (!insert) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  668) 		while (type->cnt + nr_new > type->max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  669) 			if (memblock_double_array(type, obase, size) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  670) 				return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  671) 		insert = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  672) 		goto repeat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  673) 	} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  674) 		memblock_merge_regions(type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  675) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  676) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  678) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  679) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  680)  * memblock_add_node - add new memblock region within a NUMA node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  681)  * @base: base address of the new region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  682)  * @size: size of the new region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  683)  * @nid: nid of the new region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  684)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  685)  * Add new memblock region [@base, @base + @size) to the "memory"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  686)  * type. See memblock_add_range() description for mode details
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  687)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  688)  * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  689)  * 0 on success, -errno on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  690)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  691) int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  692) 				       int nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  693) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  694) 	return memblock_add_range(&memblock.memory, base, size, nid, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  695) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  696) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  697) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  698)  * memblock_add - add new memblock region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  699)  * @base: base address of the new region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  700)  * @size: size of the new region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  701)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  702)  * Add new memblock region [@base, @base + @size) to the "memory"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  703)  * type. See memblock_add_range() description for mode details
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  704)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  705)  * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  706)  * 0 on success, -errno on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  707)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  708) int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  709) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  710) 	phys_addr_t end = base + size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  711) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  712) 	memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  713) 		     &base, &end, (void *)_RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  714) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  715) 	return memblock_add_range(&memblock.memory, base, size, MAX_NUMNODES, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  718) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  719)  * memblock_isolate_range - isolate given range into disjoint memblocks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  720)  * @type: memblock type to isolate range for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  721)  * @base: base of range to isolate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  722)  * @size: size of range to isolate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  723)  * @start_rgn: out parameter for the start of isolated region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  724)  * @end_rgn: out parameter for the end of isolated region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  725)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  726)  * Walk @type and ensure that regions don't cross the boundaries defined by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  727)  * [@base, @base + @size).  Crossing regions are split at the boundaries,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  728)  * which may create at most two more regions.  The index of the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  729)  * region inside the range is returned in *@start_rgn and end in *@end_rgn.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  730)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  731)  * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  732)  * 0 on success, -errno on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  733)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  734) static int __init_memblock memblock_isolate_range(struct memblock_type *type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  735) 					phys_addr_t base, phys_addr_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  736) 					int *start_rgn, int *end_rgn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  737) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  738) 	phys_addr_t end = base + memblock_cap_size(base, &size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  739) 	int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  740) 	struct memblock_region *rgn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  741) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  742) 	*start_rgn = *end_rgn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  744) 	if (!size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  745) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  746) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  747) 	/* we'll create at most two more regions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  748) 	while (type->cnt + 2 > type->max)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  749) 		if (memblock_double_array(type, base, size) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  750) 			return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  751) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  752) 	for_each_memblock_type(idx, type, rgn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  753) 		phys_addr_t rbase = rgn->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  754) 		phys_addr_t rend = rbase + rgn->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  755) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  756) 		if (rbase >= end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  757) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  758) 		if (rend <= base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  759) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  760) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  761) 		if (rbase < base) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  762) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  763) 			 * @rgn intersects from below.  Split and continue
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  764) 			 * to process the next region - the new top half.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  765) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  766) 			rgn->base = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  767) 			rgn->size -= base - rbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  768) 			type->total_size -= base - rbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  769) 			memblock_insert_region(type, idx, rbase, base - rbase,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  770) 					       memblock_get_region_node(rgn),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  771) 					       rgn->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  772) 		} else if (rend > end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  773) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  774) 			 * @rgn intersects from above.  Split and redo the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  775) 			 * current region - the new bottom half.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  776) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  777) 			rgn->base = end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  778) 			rgn->size -= end - rbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  779) 			type->total_size -= end - rbase;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  780) 			memblock_insert_region(type, idx--, rbase, end - rbase,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  781) 					       memblock_get_region_node(rgn),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  782) 					       rgn->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  783) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  784) 			/* @rgn is fully contained, record it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  785) 			if (!*end_rgn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  786) 				*start_rgn = idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  787) 			*end_rgn = idx + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  788) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  789) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  790) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  791) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  792) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  793) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  794) static int __init_memblock memblock_remove_range(struct memblock_type *type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  795) 					  phys_addr_t base, phys_addr_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  796) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  797) 	int start_rgn, end_rgn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  798) 	int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  799) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  800) 	ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  801) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  802) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  803) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  804) 	for (i = end_rgn - 1; i >= start_rgn; i--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  805) 		memblock_remove_region(type, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  806) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  807) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  808) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  809) int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  810) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  811) 	phys_addr_t end = base + size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  812) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  813) 	memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  814) 		     &base, &end, (void *)_RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  815) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  816) 	return memblock_remove_range(&memblock.memory, base, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  817) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  818) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  819) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  820)  * memblock_free - free boot memory block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  821)  * @base: phys starting address of the  boot memory block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  822)  * @size: size of the boot memory block in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  823)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  824)  * Free boot memory block previously allocated by memblock_alloc_xx() API.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  825)  * The freeing memory will not be released to the buddy allocator.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  826)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  827) int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  828) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  829) 	phys_addr_t end = base + size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  830) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  831) 	memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  832) 		     &base, &end, (void *)_RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  833) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  834) 	kmemleak_free_part_phys(base, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  835) 	return memblock_remove_range(&memblock.reserved, base, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  836) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  837) #ifdef CONFIG_ARCH_KEEP_MEMBLOCK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  838) EXPORT_SYMBOL_GPL(memblock_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  839) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  840) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  841) int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  842) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  843) 	phys_addr_t end = base + size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  844) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  845) 	memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  846) 		     &base, &end, (void *)_RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  847) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  848) 	return memblock_add_range(&memblock.reserved, base, size, MAX_NUMNODES, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  849) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  850) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  851) #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  852) int __init_memblock memblock_physmem_add(phys_addr_t base, phys_addr_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  853) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  854) 	phys_addr_t end = base + size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  855) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  856) 	memblock_dbg("%s: [%pa-%pa] %pS\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  857) 		     &base, &end, (void *)_RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  858) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  859) 	return memblock_add_range(&physmem, base, size, MAX_NUMNODES, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  861) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  863) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  864)  * memblock_setclr_flag - set or clear flag for a memory region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  865)  * @base: base address of the region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  866)  * @size: size of the region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  867)  * @set: set or clear the flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  868)  * @flag: the flag to udpate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  869)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  870)  * This function isolates region [@base, @base + @size), and sets/clears flag
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  871)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  872)  * Return: 0 on success, -errno on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  873)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  874) static int __init_memblock memblock_setclr_flag(phys_addr_t base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  875) 				phys_addr_t size, int set, int flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  876) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  877) 	struct memblock_type *type = &memblock.memory;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  878) 	int i, ret, start_rgn, end_rgn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  880) 	ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  881) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  882) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  883) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  884) 	for (i = start_rgn; i < end_rgn; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  885) 		struct memblock_region *r = &type->regions[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  886) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  887) 		if (set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  888) 			r->flags |= flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  889) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  890) 			r->flags &= ~flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  891) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  892) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  893) 	memblock_merge_regions(type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  894) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  896) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  897) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  898)  * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  899)  * @base: the base phys addr of the region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  900)  * @size: the size of the region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  901)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  902)  * Return: 0 on success, -errno on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  903)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  904) int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  905) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  906) 	return memblock_setclr_flag(base, size, 1, MEMBLOCK_HOTPLUG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  908) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  909) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  910)  * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  911)  * @base: the base phys addr of the region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  912)  * @size: the size of the region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  913)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  914)  * Return: 0 on success, -errno on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  915)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  916) int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  917) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  918) 	return memblock_setclr_flag(base, size, 0, MEMBLOCK_HOTPLUG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  920) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  921) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  922)  * memblock_mark_mirror - Mark mirrored memory with flag MEMBLOCK_MIRROR.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  923)  * @base: the base phys addr of the region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  924)  * @size: the size of the region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  925)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  926)  * Return: 0 on success, -errno on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  927)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  928) int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  929) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  930) 	system_has_some_mirror = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  931) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  932) 	return memblock_setclr_flag(base, size, 1, MEMBLOCK_MIRROR);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  933) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  934) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  935) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  936)  * memblock_mark_nomap - Mark a memory region with flag MEMBLOCK_NOMAP.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  937)  * @base: the base phys addr of the region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  938)  * @size: the size of the region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  939)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  940)  * Return: 0 on success, -errno on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  941)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  942) int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  943) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  944) 	return memblock_setclr_flag(base, size, 1, MEMBLOCK_NOMAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  945) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  946) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  947) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  948)  * memblock_clear_nomap - Clear flag MEMBLOCK_NOMAP for a specified region.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  949)  * @base: the base phys addr of the region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  950)  * @size: the size of the region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  951)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  952)  * Return: 0 on success, -errno on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  953)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  954) int __init_memblock memblock_clear_nomap(phys_addr_t base, phys_addr_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  955) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  956) 	return memblock_setclr_flag(base, size, 0, MEMBLOCK_NOMAP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  957) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  958) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  959) static bool should_skip_region(struct memblock_type *type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  960) 			       struct memblock_region *m,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  961) 			       int nid, int flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  962) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  963) 	int m_nid = memblock_get_region_node(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  965) 	/* we never skip regions when iterating memblock.reserved or physmem */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  966) 	if (type != memblock_memory)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  967) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  968) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  969) 	/* only memory regions are associated with nodes, check it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  970) 	if (nid != NUMA_NO_NODE && nid != m_nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  971) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  972) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  973) 	/* skip hotpluggable memory regions if needed */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  974) 	if (movable_node_is_enabled() && memblock_is_hotpluggable(m) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  975) 	    !(flags & MEMBLOCK_HOTPLUG))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  976) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  977) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  978) 	/* if we want mirror memory skip non-mirror memory regions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  979) 	if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  980) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  981) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  982) 	/* skip nomap memory unless we were asked for it explicitly */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  983) 	if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  984) 		return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  986) 	return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  988) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  989) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  990)  * __next_mem_range - next function for for_each_free_mem_range() etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  991)  * @idx: pointer to u64 loop variable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  992)  * @nid: node selector, %NUMA_NO_NODE for all nodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  993)  * @flags: pick from blocks based on memory attributes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  994)  * @type_a: pointer to memblock_type from where the range is taken
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  995)  * @type_b: pointer to memblock_type which excludes memory from being taken
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  996)  * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  997)  * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  998)  * @out_nid: ptr to int for nid of the range, can be %NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300  999)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000)  * Find the first area from *@idx which matches @nid, fill the out
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)  * parameters, and update *@idx for the next iteration.  The lower 32bit of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002)  * *@idx contains index into type_a and the upper 32bit indexes the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003)  * areas before each region in type_b.	For example, if type_b regions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004)  * look like the following,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006)  *	0:[0-16), 1:[32-48), 2:[128-130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)  * The upper 32bit indexes the following regions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010)  *	0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012)  * As both region arrays are sorted, the function advances the two indices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013)  * in lockstep and returns each intersection.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) 		      struct memblock_type *type_a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) 		      struct memblock_type *type_b, phys_addr_t *out_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) 		      phys_addr_t *out_end, int *out_nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) 	int idx_a = *idx & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) 	int idx_b = *idx >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) 	if (WARN_ONCE(nid == MAX_NUMNODES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) 	"Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) 		nid = NUMA_NO_NODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) 	for (; idx_a < type_a->cnt; idx_a++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) 		struct memblock_region *m = &type_a->regions[idx_a];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) 		phys_addr_t m_start = m->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) 		phys_addr_t m_end = m->base + m->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) 		int	    m_nid = memblock_get_region_node(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) 		if (should_skip_region(type_a, m, nid, flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) 		if (!type_b) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) 			if (out_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) 				*out_start = m_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) 			if (out_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) 				*out_end = m_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) 			if (out_nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) 				*out_nid = m_nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) 			idx_a++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) 			*idx = (u32)idx_a | (u64)idx_b << 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) 		/* scan areas before each reservation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) 		for (; idx_b < type_b->cnt + 1; idx_b++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) 			struct memblock_region *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) 			phys_addr_t r_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) 			phys_addr_t r_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) 			r = &type_b->regions[idx_b];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) 			r_start = idx_b ? r[-1].base + r[-1].size : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) 			r_end = idx_b < type_b->cnt ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) 				r->base : PHYS_ADDR_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) 			 * if idx_b advanced past idx_a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) 			 * break out to advance idx_a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) 			if (r_start >= m_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) 			/* if the two regions intersect, we're done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) 			if (m_start < r_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) 				if (out_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) 					*out_start =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) 						max(m_start, r_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) 				if (out_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) 					*out_end = min(m_end, r_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) 				if (out_nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) 					*out_nid = m_nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) 				/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) 				 * The region which ends first is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) 				 * advanced for the next iteration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) 				 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) 				if (m_end <= r_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) 					idx_a++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081) 				else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) 					idx_b++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) 				*idx = (u32)idx_a | (u64)idx_b << 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) 				return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) 	/* signal end of iteration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) 	*idx = ULLONG_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094)  * __next_mem_range_rev - generic next function for for_each_*_range_rev()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)  * @idx: pointer to u64 loop variable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097)  * @nid: node selector, %NUMA_NO_NODE for all nodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)  * @flags: pick from blocks based on memory attributes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099)  * @type_a: pointer to memblock_type from where the range is taken
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)  * @type_b: pointer to memblock_type which excludes memory from being taken
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101)  * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102)  * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103)  * @out_nid: ptr to int for nid of the range, can be %NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105)  * Finds the next range from type_a which is not marked as unsuitable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106)  * in type_b.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108)  * Reverse of __next_mem_range().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) void __init_memblock __next_mem_range_rev(u64 *idx, int nid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) 					  enum memblock_flags flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) 					  struct memblock_type *type_a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) 					  struct memblock_type *type_b,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) 					  phys_addr_t *out_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) 					  phys_addr_t *out_end, int *out_nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) 	int idx_a = *idx & 0xffffffff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) 	int idx_b = *idx >> 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) 	if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) 		nid = NUMA_NO_NODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) 	if (*idx == (u64)ULLONG_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) 		idx_a = type_a->cnt - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) 		if (type_b != NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) 			idx_b = type_b->cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) 			idx_b = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) 	for (; idx_a >= 0; idx_a--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) 		struct memblock_region *m = &type_a->regions[idx_a];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) 		phys_addr_t m_start = m->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) 		phys_addr_t m_end = m->base + m->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) 		int m_nid = memblock_get_region_node(m);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) 		if (should_skip_region(type_a, m, nid, flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) 		if (!type_b) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) 			if (out_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) 				*out_start = m_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) 			if (out_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) 				*out_end = m_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) 			if (out_nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) 				*out_nid = m_nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) 			idx_a--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) 			*idx = (u32)idx_a | (u64)idx_b << 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) 		/* scan areas before each reservation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) 		for (; idx_b >= 0; idx_b--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) 			struct memblock_region *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) 			phys_addr_t r_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) 			phys_addr_t r_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) 			r = &type_b->regions[idx_b];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) 			r_start = idx_b ? r[-1].base + r[-1].size : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) 			r_end = idx_b < type_b->cnt ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) 				r->base : PHYS_ADDR_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) 			/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) 			 * if idx_b advanced past idx_a,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) 			 * break out to advance idx_a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) 			 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) 			if (r_end <= m_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) 			/* if the two regions intersect, we're done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) 			if (m_end > r_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) 				if (out_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) 					*out_start = max(m_start, r_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) 				if (out_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) 					*out_end = min(m_end, r_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) 				if (out_nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) 					*out_nid = m_nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) 				if (m_start >= r_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) 					idx_a--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) 				else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) 					idx_b--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) 				*idx = (u32)idx_a | (u64)idx_b << 32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) 				return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) 	/* signal end of iteration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188) 	*idx = ULLONG_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192)  * Common iterator interface used to define for_each_mem_pfn_range().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) void __init_memblock __next_mem_pfn_range(int *idx, int nid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) 				unsigned long *out_start_pfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) 				unsigned long *out_end_pfn, int *out_nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) 	struct memblock_type *type = &memblock.memory;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) 	struct memblock_region *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) 	int r_nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) 	while (++*idx < type->cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) 		r = &type->regions[*idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) 		r_nid = memblock_get_region_node(r);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) 		if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) 		if (nid == MAX_NUMNODES || nid == r_nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) 	if (*idx >= type->cnt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) 		*idx = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) 	if (out_start_pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) 		*out_start_pfn = PFN_UP(r->base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) 	if (out_end_pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) 		*out_end_pfn = PFN_DOWN(r->base + r->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) 	if (out_nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) 		*out_nid = r_nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)  * memblock_set_node - set node ID on memblock regions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226)  * @base: base of area to set node ID for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227)  * @size: size of area to set node ID for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228)  * @type: memblock type to set node ID for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229)  * @nid: node ID to set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231)  * Set the nid of memblock @type regions in [@base, @base + @size) to @nid.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232)  * Regions which cross the area boundaries are split as necessary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)  * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235)  * 0 on success, -errno on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) 				      struct memblock_type *type, int nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) #ifdef CONFIG_NEED_MULTIPLE_NODES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) 	int start_rgn, end_rgn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) 	int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) 	ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) 		return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) 	for (i = start_rgn; i < end_rgn; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) 		memblock_set_region_node(&type->regions[i], nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) 	memblock_merge_regions(type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258)  * __next_mem_pfn_range_in_zone - iterator for for_each_*_range_in_zone()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260)  * @idx: pointer to u64 loop variable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261)  * @zone: zone in which all of the memory blocks reside
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262)  * @out_spfn: ptr to ulong for start pfn of the range, can be %NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263)  * @out_epfn: ptr to ulong for end pfn of the range, can be %NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265)  * This function is meant to be a zone/pfn specific wrapper for the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)  * for_each_mem_range type iterators. Specifically they are used in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267)  * deferred memory init routines and as such we were duplicating much of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268)  * this logic throughout the code. So instead of having it in multiple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269)  * locations it seemed like it would make more sense to centralize this to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270)  * one new iterator that does everything they need.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) void __init_memblock
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273) __next_mem_pfn_range_in_zone(u64 *idx, struct zone *zone,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) 			     unsigned long *out_spfn, unsigned long *out_epfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) 	int zone_nid = zone_to_nid(zone);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) 	phys_addr_t spa, epa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) 	int nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) 	__next_mem_range(idx, zone_nid, MEMBLOCK_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) 			 &memblock.memory, &memblock.reserved,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) 			 &spa, &epa, &nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) 	while (*idx != U64_MAX) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) 		unsigned long epfn = PFN_DOWN(epa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) 		unsigned long spfn = PFN_UP(spa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) 		 * Verify the end is at least past the start of the zone and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) 		 * that we have at least one PFN to initialize.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) 		if (zone->zone_start_pfn < epfn && spfn < epfn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) 			/* if we went too far just stop searching */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) 			if (zone_end_pfn(zone) <= spfn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) 				*idx = U64_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) 				break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) 			}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) 			if (out_spfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) 				*out_spfn = max(zone->zone_start_pfn, spfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) 			if (out_epfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) 				*out_epfn = min(zone_end_pfn(zone), epfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) 			return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) 		__next_mem_range(idx, zone_nid, MEMBLOCK_NONE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) 				 &memblock.memory, &memblock.reserved,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) 				 &spa, &epa, &nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) 	/* signal end of iteration */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) 	if (out_spfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) 		*out_spfn = ULONG_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) 	if (out_epfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) 		*out_epfn = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322)  * memblock_alloc_range_nid - allocate boot memory block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323)  * @size: size of memory block to be allocated in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324)  * @align: alignment of the region and block's size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)  * @start: the lower bound of the memory region to allocate (phys address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326)  * @end: the upper bound of the memory region to allocate (phys address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327)  * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328)  * @exact_nid: control the allocation fall back to other nodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330)  * The allocation is performed from memory region limited by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331)  * memblock.current_limit if @end == %MEMBLOCK_ALLOC_ACCESSIBLE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333)  * If the specified node can not hold the requested memory and @exact_nid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334)  * is false, the allocation falls back to any node in the system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336)  * For systems with memory mirroring, the allocation is attempted first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337)  * from the regions with mirroring enabled and then retried from any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338)  * memory region.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340)  * In addition, function sets the min_count to 0 using kmemleak_alloc_phys for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341)  * allocated boot memory block, so that it is never reported as leaks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343)  * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344)  * Physical address of allocated memory block on success, %0 on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) 					phys_addr_t align, phys_addr_t start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) 					phys_addr_t end, int nid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) 					bool exact_nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) 	enum memblock_flags flags = choose_memblock_flags();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352) 	phys_addr_t found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) 	if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) 		nid = NUMA_NO_NODE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) 	if (!align) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) 		/* Can't use WARNs this early in boot on powerpc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) 		dump_stack();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) 		align = SMP_CACHE_BYTES;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364) 	found = memblock_find_in_range_node(size, align, start, end, nid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) 					    flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) 	if (found && !memblock_reserve(found, size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) 		goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) 	if (nid != NUMA_NO_NODE && !exact_nid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) 		found = memblock_find_in_range_node(size, align, start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) 						    end, NUMA_NO_NODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) 						    flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) 		if (found && !memblock_reserve(found, size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) 			goto done;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) 	if (flags & MEMBLOCK_MIRROR) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) 		flags &= ~MEMBLOCK_MIRROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) 		pr_warn("Could not allocate %pap bytes of mirrored memory\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) 			&size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) 		goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) done:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) 	/* Skip kmemleak for kasan_init() due to high volume. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) 	if (end != MEMBLOCK_ALLOC_KASAN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) 		/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) 		 * The min_count is set to 0 so that memblock allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) 		 * blocks are never reported as leaks. This is because many
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) 		 * of these blocks are only referred via the physical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) 		 * address which is not looked up by kmemleak.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) 		 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) 		kmemleak_alloc_phys(found, size, 0, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) 	return found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401)  * memblock_phys_alloc_range - allocate a memory block inside specified range
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402)  * @size: size of memory block to be allocated in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403)  * @align: alignment of the region and block's size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404)  * @start: the lower bound of the memory region to allocate (physical address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405)  * @end: the upper bound of the memory region to allocate (physical address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407)  * Allocate @size bytes in the between @start and @end.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409)  * Return: physical address of the allocated memory block on success,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410)  * %0 on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) phys_addr_t __init memblock_phys_alloc_range(phys_addr_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) 					     phys_addr_t align,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) 					     phys_addr_t start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) 					     phys_addr_t end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) 	memblock_dbg("%s: %llu bytes align=0x%llx from=%pa max_addr=%pa %pS\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) 		     __func__, (u64)size, (u64)align, &start, &end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) 		     (void *)_RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) 	return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) 					false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425)  * memblock_phys_alloc_try_nid - allocate a memory block from specified MUMA node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426)  * @size: size of memory block to be allocated in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427)  * @align: alignment of the region and block's size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428)  * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430)  * Allocates memory block from the specified NUMA node. If the node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431)  * has no available memory, attempts to allocated from any node in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432)  * system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434)  * Return: physical address of the allocated memory block on success,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435)  * %0 on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) phys_addr_t __init memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) 	return memblock_alloc_range_nid(size, align, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) 					MEMBLOCK_ALLOC_ACCESSIBLE, nid, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444)  * memblock_alloc_internal - allocate boot memory block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445)  * @size: size of memory block to be allocated in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446)  * @align: alignment of the region and block's size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447)  * @min_addr: the lower bound of the memory region to allocate (phys address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448)  * @max_addr: the upper bound of the memory region to allocate (phys address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449)  * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450)  * @exact_nid: control the allocation fall back to other nodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452)  * Allocates memory block using memblock_alloc_range_nid() and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453)  * converts the returned physical address to virtual.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455)  * The @min_addr limit is dropped if it can not be satisfied and the allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456)  * will fall back to memory below @min_addr. Other constraints, such
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457)  * as node and mirrored memory will be handled again in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458)  * memblock_alloc_range_nid().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460)  * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461)  * Virtual address of allocated memory block on success, NULL on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) static void * __init memblock_alloc_internal(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) 				phys_addr_t size, phys_addr_t align,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) 				phys_addr_t min_addr, phys_addr_t max_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) 				int nid, bool exact_nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) 	phys_addr_t alloc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) 	 * Detect any accidental use of these APIs after slab is ready, as at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) 	 * this moment memblock may be deinitialized already and its
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) 	 * internal data may be destroyed (after execution of memblock_free_all)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) 	if (WARN_ON_ONCE(slab_is_available()))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) 		return kzalloc_node(size, GFP_NOWAIT, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) 	if (max_addr > memblock.current_limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) 		max_addr = memblock.current_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) 	alloc = memblock_alloc_range_nid(size, align, min_addr, max_addr, nid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) 					exact_nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) 	/* retry allocation without lower limit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) 	if (!alloc && min_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) 		alloc = memblock_alloc_range_nid(size, align, 0, max_addr, nid,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) 						exact_nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) 	if (!alloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) 		return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) 	return phys_to_virt(alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496)  * memblock_alloc_exact_nid_raw - allocate boot memory block on the exact node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497)  * without zeroing memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498)  * @size: size of memory block to be allocated in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499)  * @align: alignment of the region and block's size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500)  * @min_addr: the lower bound of the memory region from where the allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501)  *	  is preferred (phys address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502)  * @max_addr: the upper bound of the memory region from where the allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503)  *	      is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504)  *	      allocate only from memory limited by memblock.current_limit value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505)  * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507)  * Public function, provides additional debug information (including caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508)  * info), if enabled. Does not zero allocated memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510)  * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511)  * Virtual address of allocated memory block on success, NULL on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) void * __init memblock_alloc_exact_nid_raw(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) 			phys_addr_t size, phys_addr_t align,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) 			phys_addr_t min_addr, phys_addr_t max_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) 			int nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518) 	void *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) 	memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) 		     __func__, (u64)size, (u64)align, nid, &min_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) 		     &max_addr, (void *)_RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) 	ptr = memblock_alloc_internal(size, align,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) 					   min_addr, max_addr, nid, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) 	if (ptr && size > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) 		page_init_poison(ptr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) 	return ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533)  * memblock_alloc_try_nid_raw - allocate boot memory block without zeroing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534)  * memory and without panicking
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535)  * @size: size of memory block to be allocated in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536)  * @align: alignment of the region and block's size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537)  * @min_addr: the lower bound of the memory region from where the allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538)  *	  is preferred (phys address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539)  * @max_addr: the upper bound of the memory region from where the allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540)  *	      is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541)  *	      allocate only from memory limited by memblock.current_limit value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542)  * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544)  * Public function, provides additional debug information (including caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545)  * info), if enabled. Does not zero allocated memory, does not panic if request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546)  * cannot be satisfied.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548)  * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549)  * Virtual address of allocated memory block on success, NULL on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) void * __init memblock_alloc_try_nid_raw(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) 			phys_addr_t size, phys_addr_t align,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) 			phys_addr_t min_addr, phys_addr_t max_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) 			int nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) 	void *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) 	memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) 		     __func__, (u64)size, (u64)align, nid, &min_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) 		     &max_addr, (void *)_RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) 	ptr = memblock_alloc_internal(size, align,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) 					   min_addr, max_addr, nid, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) 	if (ptr && size > 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) 		page_init_poison(ptr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) 	return ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571)  * memblock_alloc_try_nid - allocate boot memory block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572)  * @size: size of memory block to be allocated in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573)  * @align: alignment of the region and block's size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574)  * @min_addr: the lower bound of the memory region from where the allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575)  *	  is preferred (phys address)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576)  * @max_addr: the upper bound of the memory region from where the allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577)  *	      is preferred (phys address), or %MEMBLOCK_ALLOC_ACCESSIBLE to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578)  *	      allocate only from memory limited by memblock.current_limit value
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579)  * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581)  * Public function, provides additional debug information (including caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582)  * info), if enabled. This function zeroes the allocated memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584)  * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585)  * Virtual address of allocated memory block on success, NULL on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) void * __init memblock_alloc_try_nid(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) 			phys_addr_t size, phys_addr_t align,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) 			phys_addr_t min_addr, phys_addr_t max_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) 			int nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) 	void *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) 	memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pS\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) 		     __func__, (u64)size, (u64)align, nid, &min_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) 		     &max_addr, (void *)_RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) 	ptr = memblock_alloc_internal(size, align,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) 					   min_addr, max_addr, nid, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) 	if (ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) 		memset(ptr, 0, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602) 	return ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606)  * __memblock_free_late - free pages directly to buddy allocator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607)  * @base: phys starting address of the  boot memory block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608)  * @size: size of the boot memory block in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610)  * This is only useful when the memblock allocator has already been torn
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611)  * down, but we are still initializing the system.  Pages are released directly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612)  * to the buddy allocator.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) void __init __memblock_free_late(phys_addr_t base, phys_addr_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) 	phys_addr_t cursor, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) 	end = base + size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) 	memblock_dbg("%s: [%pa-%pa] %pS\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) 		     __func__, &base, &end, (void *)_RET_IP_);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) 	kmemleak_free_part_phys(base, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) 	cursor = PFN_UP(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) 	end = PFN_DOWN(base + size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) 	for (; cursor < end; cursor++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) 		memblock_free_pages(pfn_to_page(cursor), cursor, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) 		totalram_pages_inc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632)  * Remaining API functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) phys_addr_t __init_memblock memblock_phys_mem_size(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) 	return memblock.memory.total_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) phys_addr_t __init_memblock memblock_reserved_size(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) 	return memblock.reserved.total_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) /* lowest address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) phys_addr_t __init_memblock memblock_start_of_DRAM(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) 	return memblock.memory.regions[0].base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) phys_addr_t __init_memblock memblock_end_of_DRAM(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) 	int idx = memblock.memory.cnt - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) 	return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) EXPORT_SYMBOL_GPL(memblock_end_of_DRAM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) 	phys_addr_t max_addr = PHYS_ADDR_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) 	struct memblock_region *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) 	 * translate the memory @limit size into the max address within one of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) 	 * the memory memblock regions, if the @limit exceeds the total size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) 	 * of those regions, max_addr will keep original value PHYS_ADDR_MAX
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) 	for_each_mem_region(r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) 		if (limit <= r->size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) 			max_addr = r->base + limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) 			break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) 		limit -= r->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) 	return max_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) void __init memblock_enforce_memory_limit(phys_addr_t limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) 	phys_addr_t max_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) 	if (!limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) 	max_addr = __find_max_addr(limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) 	/* @limit exceeds the total size of the memory, do nothing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) 	if (max_addr == PHYS_ADDR_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) 	/* truncate both memory and reserved regions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) 	memblock_remove_range(&memblock.memory, max_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) 			      PHYS_ADDR_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) 	memblock_remove_range(&memblock.reserved, max_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) 			      PHYS_ADDR_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) void __init memblock_cap_memory_range(phys_addr_t base, phys_addr_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) 	int start_rgn, end_rgn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) 	int i, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) 	if (!size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) 	ret = memblock_isolate_range(&memblock.memory, base, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) 						&start_rgn, &end_rgn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) 	if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) 	/* remove all the MAP regions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) 	for (i = memblock.memory.cnt - 1; i >= end_rgn; i--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) 		if (!memblock_is_nomap(&memblock.memory.regions[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) 			memblock_remove_region(&memblock.memory, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718) 	for (i = start_rgn - 1; i >= 0; i--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) 		if (!memblock_is_nomap(&memblock.memory.regions[i]))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) 			memblock_remove_region(&memblock.memory, i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) 	/* truncate the reserved regions */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) 	memblock_remove_range(&memblock.reserved, 0, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) 	memblock_remove_range(&memblock.reserved,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) 			base + size, PHYS_ADDR_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) void __init memblock_mem_limit_remove_map(phys_addr_t limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) 	phys_addr_t max_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) 	if (!limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) 	max_addr = __find_max_addr(limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) 	/* @limit exceeds the total size of the memory, do nothing */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) 	if (max_addr == PHYS_ADDR_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) 	memblock_cap_memory_range(0, max_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) 	unsigned int left = 0, right = type->cnt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) 	do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) 		unsigned int mid = (right + left) / 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) 		if (addr < type->regions[mid].base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) 			right = mid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753) 		else if (addr >= (type->regions[mid].base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) 				  type->regions[mid].size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) 			left = mid + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) 		else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) 			return mid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) 	} while (left < right);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) 	return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) bool __init_memblock memblock_is_reserved(phys_addr_t addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) 	return memblock_search(&memblock.reserved, addr) != -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) bool __init_memblock memblock_is_memory(phys_addr_t addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) 	return memblock_search(&memblock.memory, addr) != -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) bool __init_memblock memblock_is_map_memory(phys_addr_t addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) 	int i = memblock_search(&memblock.memory, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) 	if (i == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) 	return !memblock_is_nomap(&memblock.memory.regions[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) int __init_memblock memblock_search_pfn_nid(unsigned long pfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) 			 unsigned long *start_pfn, unsigned long *end_pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) 	struct memblock_type *type = &memblock.memory;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) 	int mid = memblock_search(type, PFN_PHYS(pfn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) 	if (mid == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) 		return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) 	*start_pfn = PFN_DOWN(type->regions[mid].base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) 	*end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) 	return memblock_get_region_node(&type->regions[mid]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797)  * memblock_is_region_memory - check if a region is a subset of memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798)  * @base: base of region to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799)  * @size: size of region to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801)  * Check if the region [@base, @base + @size) is a subset of a memory block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803)  * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804)  * 0 if false, non-zero if true
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) bool __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) 	int idx = memblock_search(&memblock.memory, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) 	phys_addr_t end = base + memblock_cap_size(base, &size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) 	if (idx == -1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) 		return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) 	return (memblock.memory.regions[idx].base +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) 		 memblock.memory.regions[idx].size) >= end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818)  * memblock_is_region_reserved - check if a region intersects reserved memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819)  * @base: base of region to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820)  * @size: size of region to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822)  * Check if the region [@base, @base + @size) intersects a reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823)  * memory block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825)  * Return:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826)  * True if they intersect, false if not.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) 	return memblock_overlaps_region(&memblock.reserved, base, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) void __init_memblock memblock_trim_memory(phys_addr_t align)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) 	phys_addr_t start, end, orig_start, orig_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) 	struct memblock_region *r;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) 	for_each_mem_region(r) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) 		orig_start = r->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) 		orig_end = r->base + r->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) 		start = round_up(orig_start, align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) 		end = round_down(orig_end, align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) 		if (start == orig_start && end == orig_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) 			continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) 		if (start < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) 			r->base = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) 			r->size = end - start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) 		} else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) 			memblock_remove_region(&memblock.memory,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) 					       r - memblock.memory.regions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) 			r--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) 		}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) void __init_memblock memblock_set_current_limit(phys_addr_t limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) 	memblock.current_limit = limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) phys_addr_t __init_memblock memblock_get_current_limit(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) 	return memblock.current_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) static void __init_memblock memblock_dump(struct memblock_type *type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) 	phys_addr_t base, end, size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) 	enum memblock_flags flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) 	int idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) 	struct memblock_region *rgn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) 	pr_info(" %s.cnt  = 0x%lx\n", type->name, type->cnt);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) 	for_each_memblock_type(idx, type, rgn) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) 		char nid_buf[32] = "";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) 		base = rgn->base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) 		size = rgn->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) 		end = base + size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) 		flags = rgn->flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) #ifdef CONFIG_NEED_MULTIPLE_NODES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) 		if (memblock_get_region_node(rgn) != MAX_NUMNODES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) 			snprintf(nid_buf, sizeof(nid_buf), " on node %d",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) 				 memblock_get_region_node(rgn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) 		pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#x\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) 			type->name, idx, &base, &end, &size, nid_buf, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) static void __init_memblock __memblock_dump_all(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) 	pr_info("MEMBLOCK configuration:\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) 	pr_info(" memory size = %pa reserved size = %pa\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) 		&memblock.memory.total_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) 		&memblock.reserved.total_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) 	memblock_dump(&memblock.memory);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) 	memblock_dump(&memblock.reserved);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) 	memblock_dump(&physmem);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) void __init_memblock memblock_dump_all(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) 	if (memblock_debug)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) 		__memblock_dump_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) void __init memblock_allow_resize(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) 	memblock_can_resize = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) static int __init early_memblock(char *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) 	if (p && strstr(p, "debug"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) 		memblock_debug = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) early_param("memblock", early_memblock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) static void __init __free_pages_memory(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) 	int order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) 	while (start < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) 		order = min(MAX_ORDER - 1UL, __ffs(start));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) 		while (start + (1UL << order) > end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) 			order--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) 		memblock_free_pages(pfn_to_page(start), start, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) 		start += (1UL << order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) #if defined(CONFIG_ROCKCHIP_THUNDER_BOOT) && defined(CONFIG_SMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) int __init defer_free_memblock(void *unused)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) 	if (defer_start == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) 	pr_debug("start = %ld, end = %ld\n", defer_start, defer_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) 	__free_pages_memory(defer_start, defer_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) 	totalram_pages_add(defer_end - defer_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) 	pr_info("%s: size %luM free %luM [%luM - %luM] total %luM\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) 		defer_free_block_size >> 20,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) 		(defer_end - defer_start) >> (20 - PAGE_SHIFT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) 		defer_end >> (20 - PAGE_SHIFT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) 		defer_start >> (20 - PAGE_SHIFT),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) 		totalram_pages() >> (20 - PAGE_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965) static unsigned long __init __free_memory_core(phys_addr_t start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) 				 phys_addr_t end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968) 	unsigned long start_pfn = PFN_UP(start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) 	unsigned long end_pfn = min_t(unsigned long,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) 				      PFN_DOWN(end), max_low_pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) 	if (start_pfn >= end_pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) #if defined(CONFIG_ROCKCHIP_THUNDER_BOOT) && defined(CONFIG_SMP)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) 	if ((end - start) > defer_free_block_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) 		defer_start = start_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) 		defer_end = end_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) 		return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) 	__free_pages_memory(start_pfn, end_pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) 	return end_pfn - start_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) static unsigned long __init free_low_memory_core_early(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) 	unsigned long count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) 	phys_addr_t start, end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) 	u64 i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) 	memblock_clear_hotplug(0, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) 	for_each_reserved_mem_range(i, &start, &end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) 		reserve_bootmem_region(start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) 	/*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) 	 * We need to use NUMA_NO_NODE instead of NODE_DATA(0)->node_id
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) 	 *  because in some case like Node0 doesn't have RAM installed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 	 *  low ram will be on Node1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) 	 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) 	for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) 				NULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) 		count += __free_memory_core(start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) 	return count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) static int reset_managed_pages_done __initdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) void reset_node_managed_pages(pg_data_t *pgdat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) 	struct zone *z;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) 	for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019) 		atomic_long_set(&z->managed_pages, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022) void __init reset_all_zones_managed_pages(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) 	struct pglist_data *pgdat;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) 	if (reset_managed_pages_done)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) 		return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) 	for_each_online_pgdat(pgdat)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) 		reset_node_managed_pages(pgdat);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) 	reset_managed_pages_done = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036)  * memblock_free_all - release free pages to the buddy allocator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037)  *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038)  * Return: the number of pages actually released.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039)  */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) unsigned long __init memblock_free_all(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) 	unsigned long pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) 	reset_all_zones_managed_pages();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) 	pages = free_low_memory_core_early();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) 	totalram_pages_add(pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) 	return pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_ARCH_KEEP_MEMBLOCK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) static int memblock_debug_show(struct seq_file *m, void *private)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056) 	struct memblock_type *type = m->private;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) 	struct memblock_region *reg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) 	int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) 	phys_addr_t end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) 	for (i = 0; i < type->cnt; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) 		reg = &type->regions[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) 		end = reg->base + reg->size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) 		seq_printf(m, "%4d: ", i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066) 		seq_printf(m, "%pa..%pa\n", &reg->base, &end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) 	}
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070) DEFINE_SHOW_ATTRIBUTE(memblock_debug);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) static int __init memblock_init_debugfs(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) 	struct dentry *root = debugfs_create_dir("memblock", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) 	debugfs_create_file("memory", 0444, root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) 			    &memblock.memory, &memblock_debug_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) 	debugfs_create_file("reserved", 0444, root,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) 			    &memblock.reserved, &memblock_debug_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) 	debugfs_create_file("physmem", 0444, root, &physmem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) 			    &memblock_debug_fops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) 	return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) __initcall(memblock_init_debugfs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) 
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) #endif /* CONFIG_DEBUG_FS */