^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * mm/percpu.c - percpu memory allocator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2009 SUSE Linux Products GmbH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2009 Tejun Heo <tj@kernel.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Copyright (C) 2017 Facebook Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Copyright (C) 2017 Dennis Zhou <dennis@kernel.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * The percpu allocator handles both static and dynamic areas. Percpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * areas are allocated in chunks which are divided into units. There is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * a 1-to-1 mapping for units to possible cpus. These units are grouped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * based on NUMA properties of the machine.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * c0 c1 c2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * ------------------- ------------------- ------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * | u0 | u1 | u2 | u3 | | u0 | u1 | u2 | u3 | | u0 | u1 | u
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * ------------------- ...... ------------------- .... ------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * Allocation is done by offsets into a unit's address space. Ie., an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * area of 512 bytes at 6k in c1 occupies 512 bytes at 6k in c1:u0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * c1:u1, c1:u2, etc. On NUMA machines, the mapping may be non-linear
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * and even sparse. Access is handled by configuring percpu base
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * registers according to the cpu to unit mappings and offsetting the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * base address using pcpu_unit_size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * There is special consideration for the first chunk which must handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * the static percpu variables in the kernel image as allocation services
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * are not online yet. In short, the first chunk is structured like so:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * <Static | [Reserved] | Dynamic>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * The static data is copied from the original section managed by the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * linker. The reserved section, if non-zero, primarily manages static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * percpu variables from kernel modules. Finally, the dynamic section
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * takes care of normal allocations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * The allocator organizes chunks into lists according to free size and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * memcg-awareness. To make a percpu allocation memcg-aware the __GFP_ACCOUNT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * flag should be passed. All memcg-aware allocations are sharing one set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * of chunks and all unaccounted allocations and allocations performed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * by processes belonging to the root memory cgroup are using the second set.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * The allocator tries to allocate from the fullest chunk first. Each chunk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * is managed by a bitmap with metadata blocks. The allocation map is updated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * on every allocation and free to reflect the current state while the boundary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * map is only updated on allocation. Each metadata block contains
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) * information to help mitigate the need to iterate over large portions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * of the bitmap. The reverse mapping from page to chunk is stored in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * the page's index. Lastly, units are lazily backed and grow in unison.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * There is a unique conversion that goes on here between bytes and bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * Each bit represents a fragment of size PCPU_MIN_ALLOC_SIZE. The chunk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) * tracks the number of pages it is responsible for in nr_pages. Helper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * functions are used to convert from between the bytes, bits, and blocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * All hints are managed in bits unless explicitly stated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) * To use this allocator, arch code should do the following:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) * regular address to percpu pointer and back if they need to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) * different from the default
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * - use pcpu_setup_first_chunk() during percpu area initialization to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * setup the first chunk containing the kernel static percpu area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #include <linux/bitmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) #include <linux/lcm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #include <linux/log2.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) #include <linux/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) #include <linux/pfn.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) #include <linux/kmemleak.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) #include <linux/sched/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) #include <linux/memcontrol.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) #include <asm/sections.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) #define CREATE_TRACE_POINTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) #include <trace/events/percpu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) #include "percpu-internal.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) /* the slots are sorted by free bytes left, 1-31 bytes share the same slot */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) #define PCPU_SLOT_BASE_SHIFT 5
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) /* chunks in slots below this are subject to being sidelined on failed alloc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) #define PCPU_SLOT_FAIL_THRESHOLD 3
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #define PCPU_EMPTY_POP_PAGES_LOW 2
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) #define PCPU_EMPTY_POP_PAGES_HIGH 4
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) #ifndef __addr_to_pcpu_ptr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #define __addr_to_pcpu_ptr(addr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) (void __percpu *)((unsigned long)(addr) - \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) (unsigned long)pcpu_base_addr + \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) (unsigned long)__per_cpu_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) #ifndef __pcpu_ptr_to_addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #define __pcpu_ptr_to_addr(ptr) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) (void __force *)((unsigned long)(ptr) + \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) (unsigned long)pcpu_base_addr - \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) (unsigned long)__per_cpu_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #else /* CONFIG_SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) /* on UP, it's always identity mapped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) #define __addr_to_pcpu_ptr(addr) (void __percpu *)(addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) #define __pcpu_ptr_to_addr(ptr) (void __force *)(ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) #endif /* CONFIG_SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) static int pcpu_unit_pages __ro_after_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) static int pcpu_unit_size __ro_after_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) static int pcpu_nr_units __ro_after_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) static int pcpu_atom_size __ro_after_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) int pcpu_nr_slots __ro_after_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) static size_t pcpu_chunk_struct_size __ro_after_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) /* cpus with the lowest and highest unit addresses */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) static unsigned int pcpu_low_unit_cpu __ro_after_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) static unsigned int pcpu_high_unit_cpu __ro_after_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) /* the address of the first chunk which starts with the kernel static area */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) void *pcpu_base_addr __ro_after_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) EXPORT_SYMBOL_GPL(pcpu_base_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) static const int *pcpu_unit_map __ro_after_init; /* cpu -> unit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) const unsigned long *pcpu_unit_offsets __ro_after_init; /* cpu -> unit offset */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) /* group information, used for vm allocation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) static int pcpu_nr_groups __ro_after_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) static const unsigned long *pcpu_group_offsets __ro_after_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) static const size_t *pcpu_group_sizes __ro_after_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * The first chunk which always exists. Note that unlike other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) * chunks, this one can be allocated and mapped in several different
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) * ways and thus often doesn't live in the vmalloc area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) struct pcpu_chunk *pcpu_first_chunk __ro_after_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) * Optional reserved chunk. This chunk reserves part of the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * chunk and serves it for reserved allocations. When the reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * region doesn't exist, the following variable is NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) struct pcpu_chunk *pcpu_reserved_chunk __ro_after_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) DEFINE_SPINLOCK(pcpu_lock); /* all internal data structures */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop, map ext */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) struct list_head *pcpu_chunk_lists __ro_after_init; /* chunk list slots */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) /* chunks which need their map areas extended, protected by pcpu_lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) static LIST_HEAD(pcpu_map_extend_chunks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * The number of empty populated pages by chunk type, protected by pcpu_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * The reserved chunk doesn't contribute to the count.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) int pcpu_nr_empty_pop_pages[PCPU_NR_CHUNK_TYPES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * The number of populated pages in use by the allocator, protected by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * pcpu_lock. This number is kept per a unit per chunk (i.e. when a page gets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * allocated/deallocated, it is allocated/deallocated in all units of a chunk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) * and increments/decrements this count by 1).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) static unsigned long pcpu_nr_populated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) * Balance work is used to populate or destroy chunks asynchronously. We
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * try to keep the number of populated free pages between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * PCPU_EMPTY_POP_PAGES_LOW and HIGH for atomic allocations and at most one
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) * empty chunk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) static void pcpu_balance_workfn(struct work_struct *work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) static DECLARE_WORK(pcpu_balance_work, pcpu_balance_workfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) static bool pcpu_async_enabled __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) static bool pcpu_atomic_alloc_failed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) static void pcpu_schedule_balance_work(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) if (pcpu_async_enabled)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) schedule_work(&pcpu_balance_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) * pcpu_addr_in_chunk - check if the address is served from this chunk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * @chunk: chunk of interest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) * @addr: percpu address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * True if the address is served from this chunk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) static bool pcpu_addr_in_chunk(struct pcpu_chunk *chunk, void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) void *start_addr, *end_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) if (!chunk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) start_addr = chunk->base_addr + chunk->start_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) end_addr = chunk->base_addr + chunk->nr_pages * PAGE_SIZE -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) chunk->end_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) return addr >= start_addr && addr < end_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) static int __pcpu_size_to_slot(int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) int highbit = fls(size); /* size is in bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) return max(highbit - PCPU_SLOT_BASE_SHIFT + 2, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) static int pcpu_size_to_slot(int size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) if (size == pcpu_unit_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) return pcpu_nr_slots - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) return __pcpu_size_to_slot(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) const struct pcpu_block_md *chunk_md = &chunk->chunk_md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) if (chunk->free_bytes < PCPU_MIN_ALLOC_SIZE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) chunk_md->contig_hint == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) return pcpu_size_to_slot(chunk_md->contig_hint * PCPU_MIN_ALLOC_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) /* set the pointer to a chunk in a page struct */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) static void pcpu_set_page_chunk(struct page *page, struct pcpu_chunk *pcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) page->index = (unsigned long)pcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) /* obtain pointer to a chunk from a page struct */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) static struct pcpu_chunk *pcpu_get_page_chunk(struct page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) return (struct pcpu_chunk *)page->index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) static int __maybe_unused pcpu_page_idx(unsigned int cpu, int page_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) return pcpu_unit_map[cpu] * pcpu_unit_pages + page_idx;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) static unsigned long pcpu_unit_page_offset(unsigned int cpu, int page_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) return pcpu_unit_offsets[cpu] + (page_idx << PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) unsigned int cpu, int page_idx)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) return (unsigned long)chunk->base_addr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) pcpu_unit_page_offset(cpu, page_idx);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) * The following are helper functions to help access bitmaps and convert
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) * between bitmap offsets to address offsets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) static unsigned long *pcpu_index_alloc_map(struct pcpu_chunk *chunk, int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) return chunk->alloc_map +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) (index * PCPU_BITMAP_BLOCK_BITS / BITS_PER_LONG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) static unsigned long pcpu_off_to_block_index(int off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) return off / PCPU_BITMAP_BLOCK_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) static unsigned long pcpu_off_to_block_off(int off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) return off & (PCPU_BITMAP_BLOCK_BITS - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) static unsigned long pcpu_block_off_to_off(int index, int off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) return index * PCPU_BITMAP_BLOCK_BITS + off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * pcpu_next_hint - determine which hint to use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * @block: block of interest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * @alloc_bits: size of allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * This determines if we should scan based on the scan_hint or first_free.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * In general, we want to scan from first_free to fulfill allocations by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * first fit. However, if we know a scan_hint at position scan_hint_start
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * cannot fulfill an allocation, we can begin scanning from there knowing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * the contig_hint will be our fallback.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) static int pcpu_next_hint(struct pcpu_block_md *block, int alloc_bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) * The three conditions below determine if we can skip past the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) * scan_hint. First, does the scan hint exist. Second, is the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) * contig_hint after the scan_hint (possibly not true iff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * contig_hint == scan_hint). Third, is the allocation request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * larger than the scan_hint.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) if (block->scan_hint &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) block->contig_hint_start > block->scan_hint_start &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) alloc_bits > block->scan_hint)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) return block->scan_hint_start + block->scan_hint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) return block->first_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) * pcpu_next_md_free_region - finds the next hint free area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) * @chunk: chunk of interest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) * @bit_off: chunk offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) * @bits: size of free area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) * Helper function for pcpu_for_each_md_free_region. It checks
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) * block->contig_hint and performs aggregation across blocks to find the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) * next hint. It modifies bit_off and bits in-place to be consumed in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) * loop.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) static void pcpu_next_md_free_region(struct pcpu_chunk *chunk, int *bit_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) int *bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) int i = pcpu_off_to_block_index(*bit_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) int block_off = pcpu_off_to_block_off(*bit_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) struct pcpu_block_md *block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) *bits = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) block++, i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) /* handles contig area across blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) if (*bits) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) *bits += block->left_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) if (block->left_free == PCPU_BITMAP_BLOCK_BITS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) * This checks three things. First is there a contig_hint to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) * check. Second, have we checked this hint before by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) * comparing the block_off. Third, is this the same as the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) * right contig hint. In the last case, it spills over into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) * the next block and should be handled by the contig area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) * across blocks code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) *bits = block->contig_hint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) if (*bits && block->contig_hint_start >= block_off &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) *bits + block->contig_hint_start < PCPU_BITMAP_BLOCK_BITS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) *bit_off = pcpu_block_off_to_off(i,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) block->contig_hint_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) /* reset to satisfy the second predicate above */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) block_off = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) *bits = block->right_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) *bit_off = (i + 1) * PCPU_BITMAP_BLOCK_BITS - block->right_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * pcpu_next_fit_region - finds fit areas for a given allocation request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) * @chunk: chunk of interest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) * @alloc_bits: size of allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * @align: alignment of area (max PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * @bit_off: chunk offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) * @bits: size of free area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) * Finds the next free region that is viable for use with a given size and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) * alignment. This only returns if there is a valid area to be used for this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) * allocation. block->first_free is returned if the allocation request fits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * within the block to see if the request can be fulfilled prior to the contig
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * hint.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) static void pcpu_next_fit_region(struct pcpu_chunk *chunk, int alloc_bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) int align, int *bit_off, int *bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) int i = pcpu_off_to_block_index(*bit_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) int block_off = pcpu_off_to_block_off(*bit_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) struct pcpu_block_md *block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) *bits = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) for (block = chunk->md_blocks + i; i < pcpu_chunk_nr_blocks(chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) block++, i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) /* handles contig area across blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) if (*bits) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) *bits += block->left_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) if (*bits >= alloc_bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if (block->left_free == PCPU_BITMAP_BLOCK_BITS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) /* check block->contig_hint */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) *bits = ALIGN(block->contig_hint_start, align) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) block->contig_hint_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * This uses the block offset to determine if this has been
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) * checked in the prior iteration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) if (block->contig_hint &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) block->contig_hint_start >= block_off &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) block->contig_hint >= *bits + alloc_bits) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) int start = pcpu_next_hint(block, alloc_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) *bits += alloc_bits + block->contig_hint_start -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) *bit_off = pcpu_block_off_to_off(i, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) /* reset to satisfy the second predicate above */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) block_off = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) *bit_off = ALIGN(PCPU_BITMAP_BLOCK_BITS - block->right_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) *bits = PCPU_BITMAP_BLOCK_BITS - *bit_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) *bit_off = pcpu_block_off_to_off(i, *bit_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) if (*bits >= alloc_bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) /* no valid offsets were found - fail condition */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) *bit_off = pcpu_chunk_map_bits(chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * Metadata free area iterators. These perform aggregation of free areas
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) * based on the metadata blocks and return the offset @bit_off and size in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) * bits of the free area @bits. pcpu_for_each_fit_region only returns when
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * a fit is found for the allocation request.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) #define pcpu_for_each_md_free_region(chunk, bit_off, bits) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) for (pcpu_next_md_free_region((chunk), &(bit_off), &(bits)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) (bit_off) < pcpu_chunk_map_bits((chunk)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) (bit_off) += (bits) + 1, \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) pcpu_next_md_free_region((chunk), &(bit_off), &(bits)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) #define pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) for (pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) &(bits)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) (bit_off) < pcpu_chunk_map_bits((chunk)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) (bit_off) += (bits), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) pcpu_next_fit_region((chunk), (alloc_bits), (align), &(bit_off), \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) &(bits)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) * pcpu_mem_zalloc - allocate memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) * @size: bytes to allocate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) * @gfp: allocation flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) * Allocate @size bytes. If @size is smaller than PAGE_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) * kzalloc() is used; otherwise, the equivalent of vzalloc() is used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) * This is to facilitate passing through whitelisted flags. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) * returned memory is always zeroed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) * Pointer to the allocated area on success, NULL on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) static void *pcpu_mem_zalloc(size_t size, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) if (WARN_ON_ONCE(!slab_is_available()))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) if (size <= PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) return kzalloc(size, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) return __vmalloc(size, gfp | __GFP_ZERO);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) * pcpu_mem_free - free memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * @ptr: memory to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) * Free @ptr. @ptr should have been allocated using pcpu_mem_zalloc().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) static void pcpu_mem_free(void *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) kvfree(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) static void __pcpu_chunk_move(struct pcpu_chunk *chunk, int slot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) bool move_front)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (chunk != pcpu_reserved_chunk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) struct list_head *pcpu_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) pcpu_slot = pcpu_chunk_list(pcpu_chunk_type(chunk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) if (move_front)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) list_move(&chunk->list, &pcpu_slot[slot]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) list_move_tail(&chunk->list, &pcpu_slot[slot]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) static void pcpu_chunk_move(struct pcpu_chunk *chunk, int slot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) __pcpu_chunk_move(chunk, slot, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) * pcpu_chunk_relocate - put chunk in the appropriate chunk slot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * @chunk: chunk of interest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) * @oslot: the previous slot it was on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) * This function is called after an allocation or free changed @chunk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) * New slot according to the changed state is determined and @chunk is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) * moved to the slot. Note that the reserved chunk is never put on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) * chunk slots.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) * CONTEXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) * pcpu_lock.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) int nslot = pcpu_chunk_slot(chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) if (oslot != nslot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) __pcpu_chunk_move(chunk, nslot, oslot < nslot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) * pcpu_update_empty_pages - update empty page counters
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * @chunk: chunk of interest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) * @nr: nr of empty pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) * This is used to keep track of the empty pages now based on the premise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) * a md_block covers a page. The hint update functions recognize if a block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) * is made full or broken to calculate deltas for keeping track of free pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) static inline void pcpu_update_empty_pages(struct pcpu_chunk *chunk, int nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) chunk->nr_empty_pop_pages += nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) if (chunk != pcpu_reserved_chunk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) pcpu_nr_empty_pop_pages[pcpu_chunk_type(chunk)] += nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) * pcpu_region_overlap - determines if two regions overlap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) * @a: start of first region, inclusive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) * @b: end of first region, exclusive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) * @x: start of second region, inclusive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) * @y: end of second region, exclusive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) * This is used to determine if the hint region [a, b) overlaps with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) * allocated region [x, y).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) static inline bool pcpu_region_overlap(int a, int b, int x, int y)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) return (a < y) && (x < b);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) * pcpu_block_update - updates a block given a free area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) * @block: block of interest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) * @start: start offset in block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) * @end: end offset in block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) * Updates a block given a known free area. The region [start, end) is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) * expected to be the entirety of the free area within a block. Chooses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) * the best starting offset if the contig hints are equal.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) static void pcpu_block_update(struct pcpu_block_md *block, int start, int end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) int contig = end - start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) block->first_free = min(block->first_free, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) if (start == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) block->left_free = contig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) if (end == block->nr_bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) block->right_free = contig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) if (contig > block->contig_hint) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) /* promote the old contig_hint to be the new scan_hint */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) if (start > block->contig_hint_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) if (block->contig_hint > block->scan_hint) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) block->scan_hint_start =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) block->contig_hint_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) block->scan_hint = block->contig_hint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) } else if (start < block->scan_hint_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) * The old contig_hint == scan_hint. But, the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) * new contig is larger so hold the invariant
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) * scan_hint_start < contig_hint_start.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) block->scan_hint = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) block->scan_hint = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) block->contig_hint_start = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) block->contig_hint = contig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) } else if (contig == block->contig_hint) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) if (block->contig_hint_start &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) (!start ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) __ffs(start) > __ffs(block->contig_hint_start))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) /* start has a better alignment so use it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) block->contig_hint_start = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) if (start < block->scan_hint_start &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) block->contig_hint > block->scan_hint)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) block->scan_hint = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) } else if (start > block->scan_hint_start ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) block->contig_hint > block->scan_hint) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) * Knowing contig == contig_hint, update the scan_hint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) * if it is farther than or larger than the current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) * scan_hint.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) block->scan_hint_start = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) block->scan_hint = contig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) * The region is smaller than the contig_hint. So only update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) * the scan_hint if it is larger than or equal and farther than
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) * the current scan_hint.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) if ((start < block->contig_hint_start &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) (contig > block->scan_hint ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) (contig == block->scan_hint &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) start > block->scan_hint_start)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) block->scan_hint_start = start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) block->scan_hint = contig;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) * pcpu_block_update_scan - update a block given a free area from a scan
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) * @chunk: chunk of interest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) * @bit_off: chunk offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) * @bits: size of free area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) * Finding the final allocation spot first goes through pcpu_find_block_fit()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) * to find a block that can hold the allocation and then pcpu_alloc_area()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) * where a scan is used. When allocations require specific alignments,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) * we can inadvertently create holes which will not be seen in the alloc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) * or free paths.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) * This takes a given free area hole and updates a block as it may change the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) * scan_hint. We need to scan backwards to ensure we don't miss free bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) * from alignment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) static void pcpu_block_update_scan(struct pcpu_chunk *chunk, int bit_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) int bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) int s_off = pcpu_off_to_block_off(bit_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) int e_off = s_off + bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) int s_index, l_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) struct pcpu_block_md *block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) if (e_off > PCPU_BITMAP_BLOCK_BITS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) s_index = pcpu_off_to_block_index(bit_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680) block = chunk->md_blocks + s_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) /* scan backwards in case of alignment skipping free bits */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index), s_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) s_off = (s_off == l_bit) ? 0 : l_bit + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) pcpu_block_update(block, s_off, e_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) * pcpu_chunk_refresh_hint - updates metadata about a chunk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) * @chunk: chunk of interest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) * @full_scan: if we should scan from the beginning
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) * Iterates over the metadata blocks to find the largest contig area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) * A full scan can be avoided on the allocation path as this is triggered
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) * if we broke the contig_hint. In doing so, the scan_hint will be before
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) * the contig_hint or after if the scan_hint == contig_hint. This cannot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) * be prevented on freeing as we want to find the largest area possibly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) * spanning blocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) static void pcpu_chunk_refresh_hint(struct pcpu_chunk *chunk, bool full_scan)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) struct pcpu_block_md *chunk_md = &chunk->chunk_md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) int bit_off, bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) /* promote scan_hint to contig_hint */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) if (!full_scan && chunk_md->scan_hint) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) bit_off = chunk_md->scan_hint_start + chunk_md->scan_hint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) chunk_md->contig_hint_start = chunk_md->scan_hint_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) chunk_md->contig_hint = chunk_md->scan_hint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) chunk_md->scan_hint = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) bit_off = chunk_md->first_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) chunk_md->contig_hint = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) bits = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) pcpu_for_each_md_free_region(chunk, bit_off, bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) pcpu_block_update(chunk_md, bit_off, bit_off + bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) * pcpu_block_refresh_hint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) * @chunk: chunk of interest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) * @index: index of the metadata block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) * Scans over the block beginning at first_free and updates the block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) * metadata accordingly.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) static void pcpu_block_refresh_hint(struct pcpu_chunk *chunk, int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) struct pcpu_block_md *block = chunk->md_blocks + index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) unsigned long *alloc_map = pcpu_index_alloc_map(chunk, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) unsigned int rs, re, start; /* region start, region end */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) /* promote scan_hint to contig_hint */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) if (block->scan_hint) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) start = block->scan_hint_start + block->scan_hint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) block->contig_hint_start = block->scan_hint_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) block->contig_hint = block->scan_hint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) block->scan_hint = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) start = block->first_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) block->contig_hint = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) block->right_free = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) /* iterate over free areas and update the contig hints */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) bitmap_for_each_clear_region(alloc_map, rs, re, start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) PCPU_BITMAP_BLOCK_BITS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) pcpu_block_update(block, rs, re);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) * pcpu_block_update_hint_alloc - update hint on allocation path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) * @chunk: chunk of interest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) * @bit_off: chunk offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) * @bits: size of request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) * Updates metadata for the allocation path. The metadata only has to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) * refreshed by a full scan iff the chunk's contig hint is broken. Block level
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) * scans are required if the block's contig hint is broken.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) static void pcpu_block_update_hint_alloc(struct pcpu_chunk *chunk, int bit_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) int bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) struct pcpu_block_md *chunk_md = &chunk->chunk_md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) int nr_empty_pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) struct pcpu_block_md *s_block, *e_block, *block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) int s_index, e_index; /* block indexes of the freed allocation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) int s_off, e_off; /* block offsets of the freed allocation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) * Calculate per block offsets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) * The calculation uses an inclusive range, but the resulting offsets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) * are [start, end). e_index always points to the last block in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778) * range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) s_index = pcpu_off_to_block_index(bit_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) e_index = pcpu_off_to_block_index(bit_off + bits - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) s_off = pcpu_off_to_block_off(bit_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) s_block = chunk->md_blocks + s_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) e_block = chunk->md_blocks + e_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) * Update s_block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) * block->first_free must be updated if the allocation takes its place.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) * If the allocation breaks the contig_hint, a scan is required to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) * restore this hint.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) if (s_block->contig_hint == PCPU_BITMAP_BLOCK_BITS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) nr_empty_pages++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) if (s_off == s_block->first_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) s_block->first_free = find_next_zero_bit(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) pcpu_index_alloc_map(chunk, s_index),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) PCPU_BITMAP_BLOCK_BITS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) s_off + bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) if (pcpu_region_overlap(s_block->scan_hint_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) s_block->scan_hint_start + s_block->scan_hint,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) s_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) s_off + bits))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) s_block->scan_hint = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) if (pcpu_region_overlap(s_block->contig_hint_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) s_block->contig_hint_start +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) s_block->contig_hint,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) s_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) s_off + bits)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) /* block contig hint is broken - scan to fix it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) if (!s_off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) s_block->left_free = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) pcpu_block_refresh_hint(chunk, s_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) /* update left and right contig manually */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) s_block->left_free = min(s_block->left_free, s_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) if (s_index == e_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) s_block->right_free = min_t(int, s_block->right_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) PCPU_BITMAP_BLOCK_BITS - e_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) s_block->right_free = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) * Update e_block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) if (s_index != e_index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) if (e_block->contig_hint == PCPU_BITMAP_BLOCK_BITS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) nr_empty_pages++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) * When the allocation is across blocks, the end is along
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) * the left part of the e_block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) e_block->first_free = find_next_zero_bit(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) pcpu_index_alloc_map(chunk, e_index),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) PCPU_BITMAP_BLOCK_BITS, e_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) if (e_off == PCPU_BITMAP_BLOCK_BITS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) /* reset the block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) e_block++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) if (e_off > e_block->scan_hint_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) e_block->scan_hint = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850) e_block->left_free = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) if (e_off > e_block->contig_hint_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) /* contig hint is broken - scan to fix it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) pcpu_block_refresh_hint(chunk, e_index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) e_block->right_free =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) min_t(int, e_block->right_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) PCPU_BITMAP_BLOCK_BITS - e_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) /* update in-between md_blocks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) nr_empty_pages += (e_index - s_index - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) for (block = s_block + 1; block < e_block; block++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) block->scan_hint = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) block->contig_hint = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) block->left_free = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) block->right_free = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) if (nr_empty_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) pcpu_update_empty_pages(chunk, -nr_empty_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) if (pcpu_region_overlap(chunk_md->scan_hint_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) chunk_md->scan_hint_start +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) chunk_md->scan_hint,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) bit_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) bit_off + bits))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) chunk_md->scan_hint = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) * The only time a full chunk scan is required is if the chunk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) * contig hint is broken. Otherwise, it means a smaller space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884) * was used and therefore the chunk contig hint is still correct.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) if (pcpu_region_overlap(chunk_md->contig_hint_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) chunk_md->contig_hint_start +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) chunk_md->contig_hint,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) bit_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) bit_off + bits))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) pcpu_chunk_refresh_hint(chunk, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) * pcpu_block_update_hint_free - updates the block hints on the free path
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) * @chunk: chunk of interest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) * @bit_off: chunk offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) * @bits: size of request
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) * Updates metadata for the allocation path. This avoids a blind block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) * refresh by making use of the block contig hints. If this fails, it scans
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) * forward and backward to determine the extent of the free area. This is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) * capped at the boundary of blocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905) * A chunk update is triggered if a page becomes free, a block becomes free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) * or the free spans across blocks. This tradeoff is to minimize iterating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) * over the block metadata to update chunk_md->contig_hint.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) * chunk_md->contig_hint may be off by up to a page, but it will never be more
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) * than the available space. If the contig hint is contained in one block, it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) * will be accurate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) static void pcpu_block_update_hint_free(struct pcpu_chunk *chunk, int bit_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) int bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) int nr_empty_pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916) struct pcpu_block_md *s_block, *e_block, *block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) int s_index, e_index; /* block indexes of the freed allocation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918) int s_off, e_off; /* block offsets of the freed allocation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) int start, end; /* start and end of the whole free area */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) * Calculate per block offsets.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) * The calculation uses an inclusive range, but the resulting offsets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) * are [start, end). e_index always points to the last block in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) * range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927) s_index = pcpu_off_to_block_index(bit_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) e_index = pcpu_off_to_block_index(bit_off + bits - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) s_off = pcpu_off_to_block_off(bit_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) e_off = pcpu_off_to_block_off(bit_off + bits - 1) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932) s_block = chunk->md_blocks + s_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) e_block = chunk->md_blocks + e_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) * Check if the freed area aligns with the block->contig_hint.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937) * If it does, then the scan to find the beginning/end of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) * larger free area can be avoided.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940) * start and end refer to beginning and end of the free area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) * within each their respective blocks. This is not necessarily
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) * the entire free area as it may span blocks past the beginning
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) * or end of the block.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) start = s_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) if (s_off == s_block->contig_hint + s_block->contig_hint_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947) start = s_block->contig_hint_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) * Scan backwards to find the extent of the free area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) * find_last_bit returns the starting bit, so if the start bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) * is returned, that means there was no last bit and the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) * remainder of the chunk is free.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) int l_bit = find_last_bit(pcpu_index_alloc_map(chunk, s_index),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) start = (start == l_bit) ? 0 : l_bit + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) end = e_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961) if (e_off == e_block->contig_hint_start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) end = e_block->contig_hint_start + e_block->contig_hint;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) end = find_next_bit(pcpu_index_alloc_map(chunk, e_index),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) PCPU_BITMAP_BLOCK_BITS, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) /* update s_block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) e_off = (s_index == e_index) ? end : PCPU_BITMAP_BLOCK_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) if (!start && e_off == PCPU_BITMAP_BLOCK_BITS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) nr_empty_pages++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) pcpu_block_update(s_block, start, e_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) /* freeing in the same block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) if (s_index != e_index) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) /* update e_block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) if (end == PCPU_BITMAP_BLOCK_BITS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) nr_empty_pages++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) pcpu_block_update(e_block, 0, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) /* reset md_blocks in the middle */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) nr_empty_pages += (e_index - s_index - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) for (block = s_block + 1; block < e_block; block++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) block->first_free = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984) block->scan_hint = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) block->contig_hint_start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) block->contig_hint = PCPU_BITMAP_BLOCK_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) block->left_free = PCPU_BITMAP_BLOCK_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) block->right_free = PCPU_BITMAP_BLOCK_BITS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) if (nr_empty_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) pcpu_update_empty_pages(chunk, nr_empty_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) * Refresh chunk metadata when the free makes a block free or spans
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) * across blocks. The contig_hint may be off by up to a page, but if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) * the contig_hint is contained in a block, it will be accurate with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) * the else condition below.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001) if (((end - start) >= PCPU_BITMAP_BLOCK_BITS) || s_index != e_index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) pcpu_chunk_refresh_hint(chunk, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) pcpu_block_update(&chunk->chunk_md,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) pcpu_block_off_to_off(s_index, start),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) * pcpu_is_populated - determines if the region is populated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) * @chunk: chunk of interest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) * @bit_off: chunk offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) * @bits: size of area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) * @next_off: return value for the next offset to start searching
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) * For atomic allocations, check if the backing pages are populated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019) * Bool if the backing pages are populated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) * next_index is to skip over unpopulated blocks in pcpu_find_block_fit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) static bool pcpu_is_populated(struct pcpu_chunk *chunk, int bit_off, int bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023) int *next_off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) unsigned int page_start, page_end, rs, re;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) page_start = PFN_DOWN(bit_off * PCPU_MIN_ALLOC_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) page_end = PFN_UP((bit_off + bits) * PCPU_MIN_ALLOC_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) rs = page_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) bitmap_next_clear_region(chunk->populated, &rs, &re, page_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) if (rs >= page_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) *next_off = re * PAGE_SIZE / PCPU_MIN_ALLOC_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) * pcpu_find_block_fit - finds the block index to start searching
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) * @chunk: chunk of interest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042) * @alloc_bits: size of request in allocation units
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) * @align: alignment of area (max PAGE_SIZE bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) * @pop_only: use populated regions only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) * Given a chunk and an allocation spec, find the offset to begin searching
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) * for a free region. This iterates over the bitmap metadata blocks to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) * find an offset that will be guaranteed to fit the requirements. It is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) * not quite first fit as if the allocation does not fit in the contig hint
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) * of a block or chunk, it is skipped. This errs on the side of caution
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) * to prevent excess iteration. Poor alignment can cause the allocator to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) * skip over blocks and chunks that have valid free areas.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) * The offset in the bitmap to begin searching.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056) * -1 if no offset is found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) static int pcpu_find_block_fit(struct pcpu_chunk *chunk, int alloc_bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) size_t align, bool pop_only)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061) struct pcpu_block_md *chunk_md = &chunk->chunk_md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) int bit_off, bits, next_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) * Check to see if the allocation can fit in the chunk's contig hint.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) * This is an optimization to prevent scanning by assuming if it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) * cannot fit in the global hint, there is memory pressure and creating
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) * a new chunk would happen soon.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) bit_off = ALIGN(chunk_md->contig_hint_start, align) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) chunk_md->contig_hint_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) if (bit_off + alloc_bits > chunk_md->contig_hint)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075) bit_off = pcpu_next_hint(chunk_md, alloc_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) bits = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077) pcpu_for_each_fit_region(chunk, alloc_bits, align, bit_off, bits) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) if (!pop_only || pcpu_is_populated(chunk, bit_off, bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) &next_off))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) bit_off = next_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) bits = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) if (bit_off == pcpu_chunk_map_bits(chunk))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) return bit_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) * pcpu_find_zero_area - modified from bitmap_find_next_zero_area_off()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) * @map: the address to base the search on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) * @size: the bitmap size in bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096) * @start: the bitnumber to start searching at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) * @nr: the number of zeroed bits we're looking for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098) * @align_mask: alignment mask for zero area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) * @largest_off: offset of the largest area skipped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100) * @largest_bits: size of the largest area skipped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) * The @align_mask should be one less than a power of 2.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) * This is a modified version of bitmap_find_next_zero_area_off() to remember
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) * the largest area that was skipped. This is imperfect, but in general is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) * good enough. The largest remembered region is the largest failed region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) * seen. This does not include anything we possibly skipped due to alignment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) * pcpu_block_update_scan() does scan backwards to try and recover what was
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) * lost to alignment. While this can cause scanning to miss earlier possible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) * free areas, smaller allocations will eventually fill those holes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) static unsigned long pcpu_find_zero_area(unsigned long *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) unsigned long size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114) unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) unsigned long nr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) unsigned long align_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117) unsigned long *largest_off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) unsigned long *largest_bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) unsigned long index, end, i, area_off, area_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) again:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122) index = find_next_zero_bit(map, size, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) /* Align allocation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125) index = __ALIGN_MASK(index, align_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) area_off = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) end = index + nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129) if (end > size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) return end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) i = find_next_bit(map, end, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) if (i < end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) area_bits = i - area_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) /* remember largest unused area with best alignment */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) if (area_bits > *largest_bits ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) (area_bits == *largest_bits && *largest_off &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) (!area_off || __ffs(area_off) > __ffs(*largest_off)))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) *largest_off = area_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) *largest_bits = area_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) start = i + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143) goto again;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) return index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) * pcpu_alloc_area - allocates an area from a pcpu_chunk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) * @chunk: chunk of interest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) * @alloc_bits: size of request in allocation units
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152) * @align: alignment of area (max PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) * @start: bit_off to start searching
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155) * This function takes in a @start offset to begin searching to fit an
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) * allocation of @alloc_bits with alignment @align. It needs to scan
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) * the allocation map because if it fits within the block's contig hint,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158) * @start will be block->first_free. This is an attempt to fill the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) * allocation prior to breaking the contig hint. The allocation and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) * boundary maps are updated accordingly if it confirms a valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) * free area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) * Allocated addr offset in @chunk on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) * -1 if no matching area is found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167) static int pcpu_alloc_area(struct pcpu_chunk *chunk, int alloc_bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) size_t align, int start)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170) struct pcpu_block_md *chunk_md = &chunk->chunk_md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) size_t align_mask = (align) ? (align - 1) : 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) unsigned long area_off = 0, area_bits = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173) int bit_off, end, oslot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175) lockdep_assert_held(&pcpu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177) oslot = pcpu_chunk_slot(chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) * Search to find a fit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) end = min_t(int, start + alloc_bits + PCPU_BITMAP_BLOCK_BITS,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) pcpu_chunk_map_bits(chunk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) bit_off = pcpu_find_zero_area(chunk->alloc_map, end, start, alloc_bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) align_mask, &area_off, &area_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) if (bit_off >= end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) return -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) if (area_bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) pcpu_block_update_scan(chunk, area_off, area_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) /* update alloc map */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193) bitmap_set(chunk->alloc_map, bit_off, alloc_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) /* update boundary map */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196) set_bit(bit_off, chunk->bound_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) bitmap_clear(chunk->bound_map, bit_off + 1, alloc_bits - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) set_bit(bit_off + alloc_bits, chunk->bound_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) chunk->free_bytes -= alloc_bits * PCPU_MIN_ALLOC_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) /* update first free bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) if (bit_off == chunk_md->first_free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) chunk_md->first_free = find_next_zero_bit(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) chunk->alloc_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206) pcpu_chunk_map_bits(chunk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) bit_off + alloc_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) pcpu_block_update_hint_alloc(chunk, bit_off, alloc_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) pcpu_chunk_relocate(chunk, oslot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213) return bit_off * PCPU_MIN_ALLOC_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) * pcpu_free_area - frees the corresponding offset
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218) * @chunk: chunk of interest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) * @off: addr offset into chunk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) * This function determines the size of an allocation to free using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) * the boundary bitmap and clears the allocation map.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225) * Number of freed bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) static int pcpu_free_area(struct pcpu_chunk *chunk, int off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) struct pcpu_block_md *chunk_md = &chunk->chunk_md;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) int bit_off, bits, end, oslot, freed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) lockdep_assert_held(&pcpu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) pcpu_stats_area_dealloc(chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235) oslot = pcpu_chunk_slot(chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) bit_off = off / PCPU_MIN_ALLOC_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) /* find end index */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240) end = find_next_bit(chunk->bound_map, pcpu_chunk_map_bits(chunk),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) bit_off + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) bits = end - bit_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243) bitmap_clear(chunk->alloc_map, bit_off, bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245) freed = bits * PCPU_MIN_ALLOC_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) /* update metadata */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) chunk->free_bytes += freed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) /* update first free bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251) chunk_md->first_free = min(chunk_md->first_free, bit_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) pcpu_block_update_hint_free(chunk, bit_off, bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255) pcpu_chunk_relocate(chunk, oslot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) return freed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) static void pcpu_init_md_block(struct pcpu_block_md *block, int nr_bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) block->scan_hint = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) block->contig_hint = nr_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) block->left_free = nr_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) block->right_free = nr_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266) block->first_free = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) block->nr_bits = nr_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) static void pcpu_init_md_blocks(struct pcpu_chunk *chunk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) struct pcpu_block_md *md_block;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) /* init the chunk's block */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) pcpu_init_md_block(&chunk->chunk_md, pcpu_chunk_map_bits(chunk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) for (md_block = chunk->md_blocks;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278) md_block != chunk->md_blocks + pcpu_chunk_nr_blocks(chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) md_block++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) pcpu_init_md_block(md_block, PCPU_BITMAP_BLOCK_BITS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) * pcpu_alloc_first_chunk - creates chunks that serve the first chunk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) * @tmp_addr: the start of the region served
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) * @map_size: size of the region served
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) * This is responsible for creating the chunks that serve the first chunk. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) * base_addr is page aligned down of @tmp_addr while the region end is page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) * aligned up. Offsets are kept track of to determine the region served. All
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) * this is done to appease the bitmap allocator in avoiding partial blocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) * Chunk serving the region at @tmp_addr of @map_size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) static struct pcpu_chunk * __init pcpu_alloc_first_chunk(unsigned long tmp_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) int map_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) struct pcpu_chunk *chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) unsigned long aligned_addr, lcm_align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301) int start_offset, offset_bits, region_size, region_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) size_t alloc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) /* region calculations */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305) aligned_addr = tmp_addr & PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) start_offset = tmp_addr - aligned_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) * Align the end of the region with the LCM of PAGE_SIZE and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) * PCPU_BITMAP_BLOCK_SIZE. One of these constants is a multiple of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) * the other.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) lcm_align = lcm(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) region_size = ALIGN(start_offset + map_size, lcm_align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317) /* allocate chunk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) alloc_size = struct_size(chunk, populated,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) BITS_TO_LONGS(region_size >> PAGE_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) chunk = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321) if (!chunk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) panic("%s: Failed to allocate %zu bytes\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) alloc_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325) INIT_LIST_HEAD(&chunk->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) chunk->base_addr = (void *)aligned_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) chunk->start_offset = start_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329) chunk->end_offset = region_size - chunk->start_offset - map_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) chunk->nr_pages = region_size >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) region_bits = pcpu_chunk_map_bits(chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) alloc_size = BITS_TO_LONGS(region_bits) * sizeof(chunk->alloc_map[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) chunk->alloc_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) if (!chunk->alloc_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) panic("%s: Failed to allocate %zu bytes\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) alloc_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) alloc_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) BITS_TO_LONGS(region_bits + 1) * sizeof(chunk->bound_map[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) chunk->bound_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) if (!chunk->bound_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) panic("%s: Failed to allocate %zu bytes\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) alloc_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) alloc_size = pcpu_chunk_nr_blocks(chunk) * sizeof(chunk->md_blocks[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348) chunk->md_blocks = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) if (!chunk->md_blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) panic("%s: Failed to allocate %zu bytes\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) alloc_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) #ifdef CONFIG_MEMCG_KMEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) /* first chunk isn't memcg-aware */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) chunk->obj_cgroups = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) pcpu_init_md_blocks(chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359) /* manage populated page bitmap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) chunk->immutable = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) bitmap_fill(chunk->populated, chunk->nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) chunk->nr_populated = chunk->nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) chunk->nr_empty_pop_pages = chunk->nr_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) chunk->free_bytes = map_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) if (chunk->start_offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) /* hide the beginning of the bitmap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369) offset_bits = chunk->start_offset / PCPU_MIN_ALLOC_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) bitmap_set(chunk->alloc_map, 0, offset_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) set_bit(0, chunk->bound_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372) set_bit(offset_bits, chunk->bound_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) chunk->chunk_md.first_free = offset_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) pcpu_block_update_hint_alloc(chunk, 0, offset_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) if (chunk->end_offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380) /* hide the end of the bitmap */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) offset_bits = chunk->end_offset / PCPU_MIN_ALLOC_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) bitmap_set(chunk->alloc_map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383) pcpu_chunk_map_bits(chunk) - offset_bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) offset_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) set_bit((start_offset + map_size) / PCPU_MIN_ALLOC_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386) chunk->bound_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) set_bit(region_bits, chunk->bound_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) pcpu_block_update_hint_alloc(chunk, pcpu_chunk_map_bits(chunk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) - offset_bits, offset_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393) return chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) static struct pcpu_chunk *pcpu_alloc_chunk(enum pcpu_chunk_type type, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) struct pcpu_chunk *chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) int region_bits;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) if (!chunk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) INIT_LIST_HEAD(&chunk->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406) chunk->nr_pages = pcpu_unit_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) region_bits = pcpu_chunk_map_bits(chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) chunk->alloc_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) sizeof(chunk->alloc_map[0]), gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411) if (!chunk->alloc_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) goto alloc_map_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) chunk->bound_map = pcpu_mem_zalloc(BITS_TO_LONGS(region_bits + 1) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) sizeof(chunk->bound_map[0]), gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) if (!chunk->bound_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) goto bound_map_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) chunk->md_blocks = pcpu_mem_zalloc(pcpu_chunk_nr_blocks(chunk) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) sizeof(chunk->md_blocks[0]), gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421) if (!chunk->md_blocks)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) goto md_blocks_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) #ifdef CONFIG_MEMCG_KMEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) if (pcpu_is_memcg_chunk(type)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) chunk->obj_cgroups =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) pcpu_mem_zalloc(pcpu_chunk_map_bits(chunk) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) sizeof(struct obj_cgroup *), gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429) if (!chunk->obj_cgroups)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) goto objcg_fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) pcpu_init_md_blocks(chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436) /* init metadata */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) chunk->free_bytes = chunk->nr_pages * PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) return chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) #ifdef CONFIG_MEMCG_KMEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) objcg_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) pcpu_mem_free(chunk->md_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445) md_blocks_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) pcpu_mem_free(chunk->bound_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) bound_map_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) pcpu_mem_free(chunk->alloc_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449) alloc_map_fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) pcpu_mem_free(chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) static void pcpu_free_chunk(struct pcpu_chunk *chunk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) if (!chunk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) #ifdef CONFIG_MEMCG_KMEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) pcpu_mem_free(chunk->obj_cgroups);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) pcpu_mem_free(chunk->md_blocks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463) pcpu_mem_free(chunk->bound_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) pcpu_mem_free(chunk->alloc_map);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) pcpu_mem_free(chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) * pcpu_chunk_populated - post-population bookkeeping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470) * @chunk: pcpu_chunk which got populated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) * @page_start: the start page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) * @page_end: the end page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) * Pages in [@page_start,@page_end) have been populated to @chunk. Update
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) * the bookkeeping information accordingly. Must be called after each
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476) * successful population.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) * If this is @for_alloc, do not increment pcpu_nr_empty_pop_pages because it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) * is to serve an allocation in that area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) static void pcpu_chunk_populated(struct pcpu_chunk *chunk, int page_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482) int page_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) int nr = page_end - page_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) lockdep_assert_held(&pcpu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) bitmap_set(chunk->populated, page_start, nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) chunk->nr_populated += nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490) pcpu_nr_populated += nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) pcpu_update_empty_pages(chunk, nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496) * pcpu_chunk_depopulated - post-depopulation bookkeeping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) * @chunk: pcpu_chunk which got depopulated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) * @page_start: the start page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) * @page_end: the end page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) * Pages in [@page_start,@page_end) have been depopulated from @chunk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) * Update the bookkeeping information accordingly. Must be called after
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) * each successful depopulation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) static void pcpu_chunk_depopulated(struct pcpu_chunk *chunk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) int page_start, int page_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508) int nr = page_end - page_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) lockdep_assert_held(&pcpu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) bitmap_clear(chunk->populated, page_start, nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) chunk->nr_populated -= nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) pcpu_nr_populated -= nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) pcpu_update_empty_pages(chunk, -nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) * Chunk management implementation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) * To allow different implementations, chunk alloc/free and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) * [de]population are implemented in a separate file which is pulled
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) * into this file and compiled together. The following functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) * should be implemented.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) * pcpu_populate_chunk - populate the specified range of a chunk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528) * pcpu_depopulate_chunk - depopulate the specified range of a chunk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) * pcpu_create_chunk - create a new chunk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) * pcpu_destroy_chunk - destroy a chunk, always preceded by full depop
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) * pcpu_addr_to_page - translate address to physical address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) * pcpu_verify_alloc_info - check alloc_info is acceptable during init
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) static int pcpu_populate_chunk(struct pcpu_chunk *chunk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) int page_start, int page_end, gfp_t gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) int page_start, int page_end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) static struct pcpu_chunk *pcpu_create_chunk(enum pcpu_chunk_type type,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539) gfp_t gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) static void pcpu_destroy_chunk(struct pcpu_chunk *chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) static struct page *pcpu_addr_to_page(void *addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542) static int __init pcpu_verify_alloc_info(const struct pcpu_alloc_info *ai);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) #ifdef CONFIG_NEED_PER_CPU_KM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) #include "percpu-km.c"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) #include "percpu-vm.c"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) * pcpu_chunk_addr_search - determine chunk containing specified address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) * @addr: address for which the chunk needs to be determined.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) * This is an internal function that handles all but static allocations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) * Static percpu address values should never be passed into the allocator.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) * The address of the found chunk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) /* is it in the dynamic region (first chunk)? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) if (pcpu_addr_in_chunk(pcpu_first_chunk, addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) return pcpu_first_chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566) /* is it in the reserved region? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) if (pcpu_addr_in_chunk(pcpu_reserved_chunk, addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) return pcpu_reserved_chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) * The address is relative to unit0 which might be unused and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) * thus unmapped. Offset the address to the unit space of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573) * current processor before looking it up in the vmalloc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) * space. Note that any possible cpu id can be used here, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) * there's no need to worry about preemption or cpu hotplug.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) addr += pcpu_unit_offsets[raw_smp_processor_id()];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) return pcpu_get_page_chunk(pcpu_addr_to_page(addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) #ifdef CONFIG_MEMCG_KMEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) static enum pcpu_chunk_type pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) struct obj_cgroup **objcgp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) struct obj_cgroup *objcg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) if (!memcg_kmem_enabled() || !(gfp & __GFP_ACCOUNT))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588) return PCPU_CHUNK_ROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) objcg = get_obj_cgroup_from_current();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591) if (!objcg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) return PCPU_CHUNK_ROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) if (obj_cgroup_charge(objcg, gfp, size * num_possible_cpus())) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595) obj_cgroup_put(objcg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) return PCPU_FAIL_ALLOC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599) *objcgp = objcg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) return PCPU_CHUNK_MEMCG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604) struct pcpu_chunk *chunk, int off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) if (!objcg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) if (chunk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT] = objcg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) size * num_possible_cpus());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) obj_cgroup_uncharge(objcg, size * num_possible_cpus());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619) obj_cgroup_put(objcg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) struct obj_cgroup *objcg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) if (!pcpu_is_memcg_chunk(pcpu_chunk_type(chunk)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) objcg = chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) chunk->obj_cgroups[off >> PCPU_MIN_ALLOC_SHIFT] = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) obj_cgroup_uncharge(objcg, size * num_possible_cpus());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) mod_memcg_state(obj_cgroup_memcg(objcg), MEMCG_PERCPU_B,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) -(size * num_possible_cpus()));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640) obj_cgroup_put(objcg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643) #else /* CONFIG_MEMCG_KMEM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) static enum pcpu_chunk_type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) pcpu_memcg_pre_alloc_hook(size_t size, gfp_t gfp, struct obj_cgroup **objcgp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) return PCPU_CHUNK_ROOT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) static void pcpu_memcg_post_alloc_hook(struct obj_cgroup *objcg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) struct pcpu_chunk *chunk, int off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) static void pcpu_memcg_free_hook(struct pcpu_chunk *chunk, int off, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659) #endif /* CONFIG_MEMCG_KMEM */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) * pcpu_alloc - the percpu allocator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) * @size: size of area to allocate in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) * @align: alignment of area (max PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665) * @reserved: allocate from the reserved chunk if available
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) * @gfp: allocation flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) * Allocate percpu area of @size bytes aligned at @align. If @gfp doesn't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) * contain %GFP_KERNEL, the allocation is atomic. If @gfp has __GFP_NOWARN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) * then no warning will be triggered on invalid or failed allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) * requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) * Percpu pointer to the allocated area on success, NULL on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) gfp_t pcpu_gfp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) bool is_atomic;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) bool do_warn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) enum pcpu_chunk_type type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683) struct list_head *pcpu_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) struct obj_cgroup *objcg = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) static int warn_limit = 10;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) struct pcpu_chunk *chunk, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) const char *err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) int slot, off, cpu, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) void __percpu *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) size_t bits, bit_align;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) gfp = current_gfp_context(gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) /* whitelisted flags that can be passed to the backing allocators */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) pcpu_gfp = gfp & (GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) do_warn = !(gfp & __GFP_NOWARN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) * There is now a minimum allocation size of PCPU_MIN_ALLOC_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701) * therefore alignment must be a minimum of that many bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) * An allocation may have internal fragmentation from rounding up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) * of up to PCPU_MIN_ALLOC_SIZE - 1 bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) if (unlikely(align < PCPU_MIN_ALLOC_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) align = PCPU_MIN_ALLOC_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708) size = ALIGN(size, PCPU_MIN_ALLOC_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) bits = size >> PCPU_MIN_ALLOC_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) bit_align = align >> PCPU_MIN_ALLOC_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) !is_power_of_2(align))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) WARN(do_warn, "illegal size (%zu) or align (%zu) for percpu allocation\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) size, align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) type = pcpu_memcg_pre_alloc_hook(size, gfp, &objcg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) if (unlikely(type == PCPU_FAIL_ALLOC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) pcpu_slot = pcpu_chunk_list(type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) if (!is_atomic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) * pcpu_balance_workfn() allocates memory under this mutex,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) * and it may wait for memory reclaim. Allow current task
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) * to become OOM victim, in case of memory pressure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) if (gfp & __GFP_NOFAIL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) mutex_lock(&pcpu_alloc_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) } else if (mutex_lock_killable(&pcpu_alloc_mutex)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) pcpu_memcg_post_alloc_hook(objcg, NULL, 0, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) spin_lock_irqsave(&pcpu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) /* serve reserved allocations from the reserved chunk if available */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) if (reserved && pcpu_reserved_chunk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) chunk = pcpu_reserved_chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) off = pcpu_find_block_fit(chunk, bits, bit_align, is_atomic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) if (off < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) err = "alloc from reserved chunk failed";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) goto fail_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) off = pcpu_alloc_area(chunk, bits, bit_align, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) if (off >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) goto area_found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) err = "alloc from reserved chunk failed";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) goto fail_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) restart:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) /* search through normal chunks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) for (slot = pcpu_size_to_slot(size); slot < pcpu_nr_slots; slot++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) list_for_each_entry_safe(chunk, next, &pcpu_slot[slot], list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) off = pcpu_find_block_fit(chunk, bits, bit_align,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) is_atomic);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) if (off < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) if (slot < PCPU_SLOT_FAIL_THRESHOLD)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) pcpu_chunk_move(chunk, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770) off = pcpu_alloc_area(chunk, bits, bit_align, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) if (off >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772) goto area_found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) spin_unlock_irqrestore(&pcpu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) * No space left. Create a new chunk. We don't want multiple
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) * tasks to create chunks simultaneously. Serialize and create iff
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) * there's still no empty chunk after grabbing the mutex.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) if (is_atomic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) err = "atomic alloc failed, no space left";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) if (list_empty(&pcpu_slot[pcpu_nr_slots - 1])) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790) chunk = pcpu_create_chunk(type, pcpu_gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791) if (!chunk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) err = "failed to allocate new chunk";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) spin_lock_irqsave(&pcpu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) pcpu_chunk_relocate(chunk, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) spin_lock_irqsave(&pcpu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) goto restart;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) area_found:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) pcpu_stats_area_alloc(chunk, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) spin_unlock_irqrestore(&pcpu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) /* populate if not all pages are already there */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809) if (!is_atomic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) unsigned int page_start, page_end, rs, re;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) page_start = PFN_DOWN(off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813) page_end = PFN_UP(off + size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815) bitmap_for_each_clear_region(chunk->populated, rs, re,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) page_start, page_end) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) WARN_ON(chunk->immutable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819) ret = pcpu_populate_chunk(chunk, rs, re, pcpu_gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) spin_lock_irqsave(&pcpu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) pcpu_free_area(chunk, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) err = "failed to populate";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825) goto fail_unlock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) pcpu_chunk_populated(chunk, rs, re);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) spin_unlock_irqrestore(&pcpu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) mutex_unlock(&pcpu_alloc_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) if (pcpu_nr_empty_pop_pages[type] < PCPU_EMPTY_POP_PAGES_LOW)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) pcpu_schedule_balance_work();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) /* clear the areas and return address relative to base address */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) for_each_possible_cpu(cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) ptr = __addr_to_pcpu_ptr(chunk->base_addr + off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842) kmemleak_alloc_percpu(ptr, size, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) trace_percpu_alloc_percpu(reserved, is_atomic, size, align,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845) chunk->base_addr, off, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) pcpu_memcg_post_alloc_hook(objcg, chunk, off, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) return ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) fail_unlock:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) spin_unlock_irqrestore(&pcpu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) if (!is_atomic && do_warn && warn_limit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) size, align, is_atomic, err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) dump_stack();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) if (!--warn_limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) pr_info("limit reached, disable warning\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) if (is_atomic) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) /* see the flag handling in pcpu_blance_workfn() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865) pcpu_atomic_alloc_failed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) pcpu_schedule_balance_work();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) mutex_unlock(&pcpu_alloc_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) pcpu_memcg_post_alloc_hook(objcg, NULL, 0, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) * __alloc_percpu_gfp - allocate dynamic percpu area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) * @size: size of area to allocate in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) * @align: alignment of area (max PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) * @gfp: allocation flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) * Allocate zero-filled percpu area of @size bytes aligned at @align. If
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) * @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) * be called from any context but is a lot more likely to fail. If @gfp
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) * has __GFP_NOWARN then no warning will be triggered on invalid or failed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) * allocation requests.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) * Percpu pointer to the allocated area on success, NULL on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893) return pcpu_alloc(size, align, false, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) EXPORT_SYMBOL_GPL(__alloc_percpu_gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) * __alloc_percpu - allocate dynamic percpu area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) * @size: size of area to allocate in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) * @align: alignment of area (max PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) * Equivalent to __alloc_percpu_gfp(size, align, %GFP_KERNEL).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) void __percpu *__alloc_percpu(size_t size, size_t align)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) return pcpu_alloc(size, align, false, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) EXPORT_SYMBOL_GPL(__alloc_percpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) * __alloc_reserved_percpu - allocate reserved percpu area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) * @size: size of area to allocate in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) * @align: alignment of area (max PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915) * Allocate zero-filled percpu area of @size bytes aligned at @align
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) * from reserved percpu area if arch has set it up; otherwise,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) * allocation is served from the same dynamic area. Might sleep.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) * Might trigger writeouts.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) * CONTEXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921) * Does GFP_KERNEL allocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) * Percpu pointer to the allocated area on success, NULL on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) return pcpu_alloc(size, align, true, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) * __pcpu_balance_workfn - manage the amount of free chunks and populated pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) * @type: chunk type
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) * Reclaim all fully free chunks except for the first one. This is also
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) * responsible for maintaining the pool of empty populated pages. However,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) * it is possible that this is called when physical memory is scarce causing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) * OOM killer to be triggered. We should avoid doing so until an actual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) * allocation causes the failure as it is possible that requests can be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) * serviced from already backed regions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) static void __pcpu_balance_workfn(enum pcpu_chunk_type type)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) /* gfp flags passed to underlying allocators */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) const gfp_t gfp = GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) LIST_HEAD(to_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947) struct list_head *pcpu_slot = pcpu_chunk_list(type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) struct list_head *free_head = &pcpu_slot[pcpu_nr_slots - 1];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) struct pcpu_chunk *chunk, *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950) int slot, nr_to_pop, ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) * There's no reason to keep around multiple unused chunks and VM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954) * areas can be scarce. Destroy all free chunks except for one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) mutex_lock(&pcpu_alloc_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) spin_lock_irq(&pcpu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) list_for_each_entry_safe(chunk, next, free_head, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) WARN_ON(chunk->immutable);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962) /* spare the first one */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) if (chunk == list_first_entry(free_head, struct pcpu_chunk, list))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) list_move(&chunk->list, &to_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) spin_unlock_irq(&pcpu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) list_for_each_entry_safe(chunk, next, &to_free, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) unsigned int rs, re;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) bitmap_for_each_set_region(chunk->populated, rs, re, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) chunk->nr_pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976) pcpu_depopulate_chunk(chunk, rs, re);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) spin_lock_irq(&pcpu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) pcpu_chunk_depopulated(chunk, rs, re);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979) spin_unlock_irq(&pcpu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) pcpu_destroy_chunk(chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982) cond_resched();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) * Ensure there are certain number of free populated pages for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) * atomic allocs. Fill up from the most packed so that atomic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988) * allocs don't increase fragmentation. If atomic allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) * failed previously, always populate the maximum amount. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) * should prevent atomic allocs larger than PAGE_SIZE from keeping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) * failing indefinitely; however, large atomic allocs are not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) * something we support properly and can be highly unreliable and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993) * inefficient.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) retry_pop:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) if (pcpu_atomic_alloc_failed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) nr_to_pop = PCPU_EMPTY_POP_PAGES_HIGH;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998) /* best effort anyway, don't worry about synchronization */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) pcpu_atomic_alloc_failed = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001) nr_to_pop = clamp(PCPU_EMPTY_POP_PAGES_HIGH -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) pcpu_nr_empty_pop_pages[type],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) 0, PCPU_EMPTY_POP_PAGES_HIGH);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) for (slot = pcpu_size_to_slot(PAGE_SIZE); slot < pcpu_nr_slots; slot++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) unsigned int nr_unpop = 0, rs, re;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) if (!nr_to_pop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) spin_lock_irq(&pcpu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013) list_for_each_entry(chunk, &pcpu_slot[slot], list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) nr_unpop = chunk->nr_pages - chunk->nr_populated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) if (nr_unpop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) spin_unlock_irq(&pcpu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) if (!nr_unpop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) /* @chunk can't go away while pcpu_alloc_mutex is held */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) bitmap_for_each_clear_region(chunk->populated, rs, re, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) chunk->nr_pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) int nr = min_t(int, re - rs, nr_to_pop);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) ret = pcpu_populate_chunk(chunk, rs, rs + nr, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) if (!ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) nr_to_pop -= nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) spin_lock_irq(&pcpu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) pcpu_chunk_populated(chunk, rs, rs + nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) spin_unlock_irq(&pcpu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) nr_to_pop = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) if (!nr_to_pop)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) if (nr_to_pop) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044) /* ran out of chunks to populate, create a new one and retry */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) chunk = pcpu_create_chunk(type, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) if (chunk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) spin_lock_irq(&pcpu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048) pcpu_chunk_relocate(chunk, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) spin_unlock_irq(&pcpu_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) goto retry_pop;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) mutex_unlock(&pcpu_alloc_mutex);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) * pcpu_balance_workfn - manage the amount of free chunks and populated pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) * @work: unused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) * Call __pcpu_balance_workfn() for each chunk type.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) static void pcpu_balance_workfn(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) enum pcpu_chunk_type type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) for (type = 0; type < PCPU_NR_CHUNK_TYPES; type++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) __pcpu_balance_workfn(type);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) * free_percpu - free percpu area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) * @ptr: pointer to area to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075) * Free percpu area @ptr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077) * CONTEXT:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) * Can be called from atomic context.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) void free_percpu(void __percpu *ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) void *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) struct pcpu_chunk *chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) int size, off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) bool need_balance = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) struct list_head *pcpu_slot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) if (!ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) kmemleak_free_percpu(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094) addr = __pcpu_ptr_to_addr(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) spin_lock_irqsave(&pcpu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) chunk = pcpu_chunk_addr_search(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) off = addr - chunk->base_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) size = pcpu_free_area(chunk, off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) pcpu_slot = pcpu_chunk_list(pcpu_chunk_type(chunk));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) pcpu_memcg_free_hook(chunk, off, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107) /* if there are more than one fully free chunks, wake up grim reaper */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) if (chunk->free_bytes == pcpu_unit_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) struct pcpu_chunk *pos;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111) list_for_each_entry(pos, &pcpu_slot[pcpu_nr_slots - 1], list)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) if (pos != chunk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) need_balance = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) trace_percpu_free_percpu(chunk->base_addr, off, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120) spin_unlock_irqrestore(&pcpu_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122) if (need_balance)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) pcpu_schedule_balance_work();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125) EXPORT_SYMBOL_GPL(free_percpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) const size_t static_size = __per_cpu_end - __per_cpu_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132) unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) void *start = per_cpu_ptr(base, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) void *va = (void *)addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) if (va >= start && va < start + static_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) if (can_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) *can_addr = (unsigned long) (va - start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141) *can_addr += (unsigned long)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) per_cpu_ptr(base, get_boot_cpu_id());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148) /* on UP, can't distinguish from other static vars, always false */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) * is_kernel_percpu_address - test whether address is from static percpu area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) * @addr: address to test
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) * Test whether @addr belongs to in-kernel static percpu area. Module
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) * static percpu areas are not considered. For those, use
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) * is_module_percpu_address().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) * %true if @addr is from in-kernel static percpu area, %false otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) bool is_kernel_percpu_address(unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) return __is_kernel_percpu_address(addr, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) * per_cpu_ptr_to_phys - convert translated percpu address to physical address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170) * @addr: the address to be converted to physical address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) * Given @addr which is dereferenceable address obtained via one of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) * percpu access macros, this function translates it into its physical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) * address. The caller is responsible for ensuring @addr stays valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175) * until this function finishes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) * percpu allocator has special setup for the first chunk, which currently
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) * supports either embedding in linear address space or vmalloc mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) * and, from the second one, the backing allocator (currently either vm or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) * km) provides translation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) * The addr can be translated simply without checking if it falls into the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) * first chunk. But the current code reflects better how percpu allocator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) * actually works, and the verification can discover both bugs in percpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) * allocator itself and per_cpu_ptr_to_phys() callers. So we keep current
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186) * code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) * The physical address for @addr.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) phys_addr_t per_cpu_ptr_to_phys(void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) bool in_first_chunk = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) unsigned long first_low, first_high;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) * The following test on unit_low/high isn't strictly
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) * necessary but will speed up lookups of addresses which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201) * aren't in the first chunk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) * The address check is against full chunk sizes. pcpu_base_addr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) * points to the beginning of the first chunk including the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) * static region. Assumes good intent as the first chunk may
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206) * not be full (ie. < pcpu_unit_pages in size).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) first_low = (unsigned long)pcpu_base_addr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) pcpu_unit_page_offset(pcpu_low_unit_cpu, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) first_high = (unsigned long)pcpu_base_addr +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211) pcpu_unit_page_offset(pcpu_high_unit_cpu, pcpu_unit_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) if ((unsigned long)addr >= first_low &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) (unsigned long)addr < first_high) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) void *start = per_cpu_ptr(base, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) if (addr >= start && addr < start + pcpu_unit_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) in_first_chunk = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) if (in_first_chunk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) if (!is_vmalloc_addr(addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) return __pa(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228) return page_to_phys(vmalloc_to_page(addr)) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) offset_in_page(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231) return page_to_phys(pcpu_addr_to_page(addr)) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) offset_in_page(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) EXPORT_SYMBOL_GPL(per_cpu_ptr_to_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) * pcpu_alloc_alloc_info - allocate percpu allocation info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238) * @nr_groups: the number of groups
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) * @nr_units: the number of units
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) * Allocate ai which is large enough for @nr_groups groups containing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242) * @nr_units units. The returned ai's groups[0].cpu_map points to the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) * cpu_map array which is long enough for @nr_units and filled with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244) * NR_CPUS. It's the caller's responsibility to initialize cpu_map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) * pointer of other groups.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) * Pointer to the allocated pcpu_alloc_info on success, NULL on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) * failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252) int nr_units)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254) struct pcpu_alloc_info *ai;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) size_t base_size, ai_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256) void *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) int unit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) base_size = ALIGN(struct_size(ai, groups, nr_groups),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) __alignof__(ai->groups[0].cpu_map[0]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261) ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) ptr = memblock_alloc(PFN_ALIGN(ai_size), PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) if (!ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266) ai = ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) ptr += base_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) ai->groups[0].cpu_map = ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) for (unit = 0; unit < nr_units; unit++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) ai->groups[0].cpu_map[unit] = NR_CPUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274) ai->nr_groups = nr_groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) ai->__ai_size = PFN_ALIGN(ai_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) return ai;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281) * pcpu_free_alloc_info - free percpu allocation info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) * @ai: pcpu_alloc_info to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) * Free @ai which was allocated by pcpu_alloc_alloc_info().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288) memblock_free_early(__pa(ai), ai->__ai_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) * pcpu_dump_alloc_info - print out information about pcpu_alloc_info
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293) * @lvl: loglevel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) * @ai: allocation info to dump
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) * Print out information about @ai using loglevel @lvl.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298) static void pcpu_dump_alloc_info(const char *lvl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) const struct pcpu_alloc_info *ai)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) int group_width = 1, cpu_width = 1, width;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) char empty_str[] = "--------";
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) int alloc = 0, alloc_end = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) int group, v;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) int upa, apl; /* units per alloc, allocs per line */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) v = ai->nr_groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) while (v /= 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) group_width++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) v = num_possible_cpus();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) while (v /= 10)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313) cpu_width++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) empty_str[min_t(int, cpu_width, sizeof(empty_str) - 1)] = '\0';
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) upa = ai->alloc_size / ai->unit_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) width = upa * (cpu_width + 1) + group_width + 3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) apl = rounddown_pow_of_two(max(60 / width, 1));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) printk("%spcpu-alloc: s%zu r%zu d%zu u%zu alloc=%zu*%zu",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321) lvl, ai->static_size, ai->reserved_size, ai->dyn_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) ai->unit_size, ai->alloc_size / ai->atom_size, ai->atom_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) for (group = 0; group < ai->nr_groups; group++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) const struct pcpu_group_info *gi = &ai->groups[group];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) int unit = 0, unit_end = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2328) BUG_ON(gi->nr_units % upa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2329) for (alloc_end += gi->nr_units / upa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2330) alloc < alloc_end; alloc++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2331) if (!(alloc % apl)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2332) pr_cont("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2333) printk("%spcpu-alloc: ", lvl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2334) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2335) pr_cont("[%0*d] ", group_width, group);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2337) for (unit_end += upa; unit < unit_end; unit++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2338) if (gi->cpu_map[unit] != NR_CPUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2339) pr_cont("%0*d ",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2340) cpu_width, gi->cpu_map[unit]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2341) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2342) pr_cont("%s ", empty_str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2344) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2345) pr_cont("\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2346) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2347)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2348) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2349) * pcpu_setup_first_chunk - initialize the first percpu chunk
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2350) * @ai: pcpu_alloc_info describing how to percpu area is shaped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2351) * @base_addr: mapped address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2352) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2353) * Initialize the first percpu chunk which contains the kernel static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2354) * percpu area. This function is to be called from arch percpu area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2355) * setup path.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2356) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2357) * @ai contains all information necessary to initialize the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2358) * chunk and prime the dynamic percpu allocator.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2359) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2360) * @ai->static_size is the size of static percpu area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2361) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2362) * @ai->reserved_size, if non-zero, specifies the amount of bytes to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2363) * reserve after the static area in the first chunk. This reserves
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2364) * the first chunk such that it's available only through reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2365) * percpu allocation. This is primarily used to serve module percpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2366) * static areas on architectures where the addressing model has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2367) * limited offset range for symbol relocations to guarantee module
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2368) * percpu symbols fall inside the relocatable range.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2369) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2370) * @ai->dyn_size determines the number of bytes available for dynamic
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2371) * allocation in the first chunk. The area between @ai->static_size +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2372) * @ai->reserved_size + @ai->dyn_size and @ai->unit_size is unused.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2373) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2374) * @ai->unit_size specifies unit size and must be aligned to PAGE_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2375) * and equal to or larger than @ai->static_size + @ai->reserved_size +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2376) * @ai->dyn_size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2377) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2378) * @ai->atom_size is the allocation atom size and used as alignment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2379) * for vm areas.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2380) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2381) * @ai->alloc_size is the allocation size and always multiple of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2382) * @ai->atom_size. This is larger than @ai->atom_size if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2383) * @ai->unit_size is larger than @ai->atom_size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2384) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2385) * @ai->nr_groups and @ai->groups describe virtual memory layout of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2386) * percpu areas. Units which should be colocated are put into the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2387) * same group. Dynamic VM areas will be allocated according to these
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2388) * groupings. If @ai->nr_groups is zero, a single group containing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2389) * all units is assumed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2390) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2391) * The caller should have mapped the first chunk at @base_addr and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2392) * copied static data to each unit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2393) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2394) * The first chunk will always contain a static and a dynamic region.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2395) * However, the static region is not managed by any chunk. If the first
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2396) * chunk also contains a reserved region, it is served by two chunks -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2397) * one for the reserved region and one for the dynamic region. They
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2398) * share the same vm, but use offset regions in the area allocation map.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2399) * The chunk serving the dynamic region is circulated in the chunk slots
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2400) * and available for dynamic allocation like any other chunk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2401) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2402) void __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2403) void *base_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2405) size_t size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2406) size_t static_size, dyn_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2407) struct pcpu_chunk *chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2408) unsigned long *group_offsets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2409) size_t *group_sizes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2410) unsigned long *unit_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2411) unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2412) int *unit_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2413) int group, unit, i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2414) int map_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2415) unsigned long tmp_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2416) size_t alloc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2417) enum pcpu_chunk_type type;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2419) #define PCPU_SETUP_BUG_ON(cond) do { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2420) if (unlikely(cond)) { \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2421) pr_emerg("failed to initialize, %s\n", #cond); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2422) pr_emerg("cpu_possible_mask=%*pb\n", \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2423) cpumask_pr_args(cpu_possible_mask)); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2424) pcpu_dump_alloc_info(KERN_EMERG, ai); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2425) BUG(); \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2426) } \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2427) } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2429) /* sanity checks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2430) PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2431) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2432) PCPU_SETUP_BUG_ON(!ai->static_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2433) PCPU_SETUP_BUG_ON(offset_in_page(__per_cpu_start));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2434) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2435) PCPU_SETUP_BUG_ON(!base_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2436) PCPU_SETUP_BUG_ON(offset_in_page(base_addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2437) PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2438) PCPU_SETUP_BUG_ON(offset_in_page(ai->unit_size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2439) PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2440) PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->unit_size, PCPU_BITMAP_BLOCK_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2441) PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2442) PCPU_SETUP_BUG_ON(!ai->dyn_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2443) PCPU_SETUP_BUG_ON(!IS_ALIGNED(ai->reserved_size, PCPU_MIN_ALLOC_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2444) PCPU_SETUP_BUG_ON(!(IS_ALIGNED(PCPU_BITMAP_BLOCK_SIZE, PAGE_SIZE) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2445) IS_ALIGNED(PAGE_SIZE, PCPU_BITMAP_BLOCK_SIZE)));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2446) PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2448) /* process group information and build config tables accordingly */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2449) alloc_size = ai->nr_groups * sizeof(group_offsets[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2450) group_offsets = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2451) if (!group_offsets)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2452) panic("%s: Failed to allocate %zu bytes\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2453) alloc_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2455) alloc_size = ai->nr_groups * sizeof(group_sizes[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2456) group_sizes = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2457) if (!group_sizes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2458) panic("%s: Failed to allocate %zu bytes\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2459) alloc_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2460)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2461) alloc_size = nr_cpu_ids * sizeof(unit_map[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2462) unit_map = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2463) if (!unit_map)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2464) panic("%s: Failed to allocate %zu bytes\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2465) alloc_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2466)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2467) alloc_size = nr_cpu_ids * sizeof(unit_off[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2468) unit_off = memblock_alloc(alloc_size, SMP_CACHE_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2469) if (!unit_off)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2470) panic("%s: Failed to allocate %zu bytes\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2471) alloc_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2473) for (cpu = 0; cpu < nr_cpu_ids; cpu++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2474) unit_map[cpu] = UINT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2476) pcpu_low_unit_cpu = NR_CPUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2477) pcpu_high_unit_cpu = NR_CPUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2479) for (group = 0, unit = 0; group < ai->nr_groups; group++, unit += i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2480) const struct pcpu_group_info *gi = &ai->groups[group];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2482) group_offsets[group] = gi->base_offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2483) group_sizes[group] = gi->nr_units * ai->unit_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2484)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2485) for (i = 0; i < gi->nr_units; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2486) cpu = gi->cpu_map[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2487) if (cpu == NR_CPUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2488) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2490) PCPU_SETUP_BUG_ON(cpu >= nr_cpu_ids);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2491) PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2492) PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2494) unit_map[cpu] = unit + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2495) unit_off[cpu] = gi->base_offset + i * ai->unit_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2497) /* determine low/high unit_cpu */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2498) if (pcpu_low_unit_cpu == NR_CPUS ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2499) unit_off[cpu] < unit_off[pcpu_low_unit_cpu])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2500) pcpu_low_unit_cpu = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2501) if (pcpu_high_unit_cpu == NR_CPUS ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2502) unit_off[cpu] > unit_off[pcpu_high_unit_cpu])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2503) pcpu_high_unit_cpu = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2504) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2505) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2506) pcpu_nr_units = unit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2507)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2508) for_each_possible_cpu(cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2509) PCPU_SETUP_BUG_ON(unit_map[cpu] == UINT_MAX);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2510)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2511) /* we're done parsing the input, undefine BUG macro and dump config */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2512) #undef PCPU_SETUP_BUG_ON
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2513) pcpu_dump_alloc_info(KERN_DEBUG, ai);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2515) pcpu_nr_groups = ai->nr_groups;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2516) pcpu_group_offsets = group_offsets;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2517) pcpu_group_sizes = group_sizes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2518) pcpu_unit_map = unit_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2519) pcpu_unit_offsets = unit_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2521) /* determine basic parameters */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2522) pcpu_unit_pages = ai->unit_size >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2523) pcpu_unit_size = pcpu_unit_pages << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2524) pcpu_atom_size = ai->atom_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2525) pcpu_chunk_struct_size = struct_size(chunk, populated,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2526) BITS_TO_LONGS(pcpu_unit_pages));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2527)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2528) pcpu_stats_save_ai(ai);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2530) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2531) * Allocate chunk slots. The additional last slot is for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2532) * empty chunks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2533) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2534) pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2535) pcpu_chunk_lists = memblock_alloc(pcpu_nr_slots *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2536) sizeof(pcpu_chunk_lists[0]) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2537) PCPU_NR_CHUNK_TYPES,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2538) SMP_CACHE_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2539) if (!pcpu_chunk_lists)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2540) panic("%s: Failed to allocate %zu bytes\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2541) pcpu_nr_slots * sizeof(pcpu_chunk_lists[0]) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2542) PCPU_NR_CHUNK_TYPES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2544) for (type = 0; type < PCPU_NR_CHUNK_TYPES; type++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2545) for (i = 0; i < pcpu_nr_slots; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2546) INIT_LIST_HEAD(&pcpu_chunk_list(type)[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2548) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2549) * The end of the static region needs to be aligned with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2550) * minimum allocation size as this offsets the reserved and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2551) * dynamic region. The first chunk ends page aligned by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2552) * expanding the dynamic region, therefore the dynamic region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2553) * can be shrunk to compensate while still staying above the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2554) * configured sizes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2555) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2556) static_size = ALIGN(ai->static_size, PCPU_MIN_ALLOC_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2557) dyn_size = ai->dyn_size - (static_size - ai->static_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2559) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2560) * Initialize first chunk.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2561) * If the reserved_size is non-zero, this initializes the reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2562) * chunk. If the reserved_size is zero, the reserved chunk is NULL
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2563) * and the dynamic region is initialized here. The first chunk,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2564) * pcpu_first_chunk, will always point to the chunk that serves
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2565) * the dynamic region.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2566) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2567) tmp_addr = (unsigned long)base_addr + static_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2568) map_size = ai->reserved_size ?: dyn_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2569) chunk = pcpu_alloc_first_chunk(tmp_addr, map_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2570)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2571) /* init dynamic chunk if necessary */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2572) if (ai->reserved_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2573) pcpu_reserved_chunk = chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2575) tmp_addr = (unsigned long)base_addr + static_size +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2576) ai->reserved_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2577) map_size = dyn_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2578) chunk = pcpu_alloc_first_chunk(tmp_addr, map_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2579) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2580)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2581) /* link the first chunk in */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2582) pcpu_first_chunk = chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2583) pcpu_nr_empty_pop_pages[PCPU_CHUNK_ROOT] = pcpu_first_chunk->nr_empty_pop_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2584) pcpu_chunk_relocate(pcpu_first_chunk, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2585)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2586) /* include all regions of the first chunk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2587) pcpu_nr_populated += PFN_DOWN(size_sum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2589) pcpu_stats_chunk_alloc();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2590) trace_percpu_create_chunk(base_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2592) /* we're done */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2593) pcpu_base_addr = base_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2594) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2596) #ifdef CONFIG_SMP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2597)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2598) const char * const pcpu_fc_names[PCPU_FC_NR] __initconst = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2599) [PCPU_FC_AUTO] = "auto",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2600) [PCPU_FC_EMBED] = "embed",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2601) [PCPU_FC_PAGE] = "page",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2602) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2603)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2604) enum pcpu_fc pcpu_chosen_fc __initdata = PCPU_FC_AUTO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2605)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2606) static int __init percpu_alloc_setup(char *str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2607) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2608) if (!str)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2609) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2611) if (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2612) /* nada */;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2613) #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2614) else if (!strcmp(str, "embed"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2615) pcpu_chosen_fc = PCPU_FC_EMBED;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2616) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2617) #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2618) else if (!strcmp(str, "page"))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2619) pcpu_chosen_fc = PCPU_FC_PAGE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2620) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2621) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2622) pr_warn("unknown allocator %s specified\n", str);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2624) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2626) early_param("percpu_alloc", percpu_alloc_setup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2628) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2629) * pcpu_embed_first_chunk() is used by the generic percpu setup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2630) * Build it if needed by the arch config or the generic setup is going
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2631) * to be used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2632) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2633) #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2634) !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2635) #define BUILD_EMBED_FIRST_CHUNK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2636) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2638) /* build pcpu_page_first_chunk() iff needed by the arch config */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2639) #if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2640) #define BUILD_PAGE_FIRST_CHUNK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2641) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2642)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2643) /* pcpu_build_alloc_info() is used by both embed and page first chunk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2644) #if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2645) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2646) * pcpu_build_alloc_info - build alloc_info considering distances between CPUs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2647) * @reserved_size: the size of reserved percpu area in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2648) * @dyn_size: minimum free size for dynamic allocation in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2649) * @atom_size: allocation atom size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2650) * @cpu_distance_fn: callback to determine distance between cpus, optional
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2651) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2652) * This function determines grouping of units, their mappings to cpus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2653) * and other parameters considering needed percpu size, allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2654) * atom size and distances between CPUs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2655) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2656) * Groups are always multiples of atom size and CPUs which are of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2657) * LOCAL_DISTANCE both ways are grouped together and share space for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2658) * units in the same group. The returned configuration is guaranteed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2659) * to have CPUs on different nodes on different groups and >=75% usage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2660) * of allocated virtual address space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2661) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2662) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2663) * On success, pointer to the new allocation_info is returned. On
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2664) * failure, ERR_PTR value is returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2665) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2666) static struct pcpu_alloc_info * __init pcpu_build_alloc_info(
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2667) size_t reserved_size, size_t dyn_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2668) size_t atom_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2669) pcpu_fc_cpu_distance_fn_t cpu_distance_fn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2670) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2671) static int group_map[NR_CPUS] __initdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2672) static int group_cnt[NR_CPUS] __initdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2673) const size_t static_size = __per_cpu_end - __per_cpu_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2674) int nr_groups = 1, nr_units = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2675) size_t size_sum, min_unit_size, alloc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2676) int upa, max_upa, best_upa; /* units_per_alloc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2677) int last_allocs, group, unit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2678) unsigned int cpu, tcpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2679) struct pcpu_alloc_info *ai;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2680) unsigned int *cpu_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2681)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2682) /* this function may be called multiple times */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2683) memset(group_map, 0, sizeof(group_map));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2684) memset(group_cnt, 0, sizeof(group_cnt));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2685)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2686) /* calculate size_sum and ensure dyn_size is enough for early alloc */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2687) size_sum = PFN_ALIGN(static_size + reserved_size +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2688) max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2689) dyn_size = size_sum - static_size - reserved_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2690)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2691) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2692) * Determine min_unit_size, alloc_size and max_upa such that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2693) * alloc_size is multiple of atom_size and is the smallest
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2694) * which can accommodate 4k aligned segments which are equal to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2695) * or larger than min_unit_size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2696) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2697) min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2699) /* determine the maximum # of units that can fit in an allocation */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2700) alloc_size = roundup(min_unit_size, atom_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2701) upa = alloc_size / min_unit_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2702) while (alloc_size % upa || (offset_in_page(alloc_size / upa)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2703) upa--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2704) max_upa = upa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2705)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2706) /* group cpus according to their proximity */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2707) for_each_possible_cpu(cpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2708) group = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2709) next_group:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2710) for_each_possible_cpu(tcpu) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2711) if (cpu == tcpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2712) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2713) if (group_map[tcpu] == group && cpu_distance_fn &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2714) (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2715) cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2716) group++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2717) nr_groups = max(nr_groups, group + 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2718) goto next_group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2719) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2720) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2721) group_map[cpu] = group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2722) group_cnt[group]++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2723) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2724)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2725) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2726) * Wasted space is caused by a ratio imbalance of upa to group_cnt.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2727) * Expand the unit_size until we use >= 75% of the units allocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2728) * Related to atom_size, which could be much larger than the unit_size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2729) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2730) last_allocs = INT_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2731) for (upa = max_upa; upa; upa--) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2732) int allocs = 0, wasted = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2733)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2734) if (alloc_size % upa || (offset_in_page(alloc_size / upa)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2735) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2736)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2737) for (group = 0; group < nr_groups; group++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2738) int this_allocs = DIV_ROUND_UP(group_cnt[group], upa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2739) allocs += this_allocs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2740) wasted += this_allocs * upa - group_cnt[group];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2741) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2742)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2743) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2744) * Don't accept if wastage is over 1/3. The
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2745) * greater-than comparison ensures upa==1 always
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2746) * passes the following check.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2747) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2748) if (wasted > num_possible_cpus() / 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2749) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2750)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2751) /* and then don't consume more memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2752) if (allocs > last_allocs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2753) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2754) last_allocs = allocs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2755) best_upa = upa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2756) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2757) upa = best_upa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2759) /* allocate and fill alloc_info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2760) for (group = 0; group < nr_groups; group++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2761) nr_units += roundup(group_cnt[group], upa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2762)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2763) ai = pcpu_alloc_alloc_info(nr_groups, nr_units);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2764) if (!ai)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2765) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2766) cpu_map = ai->groups[0].cpu_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2768) for (group = 0; group < nr_groups; group++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2769) ai->groups[group].cpu_map = cpu_map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2770) cpu_map += roundup(group_cnt[group], upa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2773) ai->static_size = static_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2774) ai->reserved_size = reserved_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2775) ai->dyn_size = dyn_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2776) ai->unit_size = alloc_size / upa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2777) ai->atom_size = atom_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2778) ai->alloc_size = alloc_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2779)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2780) for (group = 0, unit = 0; group < nr_groups; group++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2781) struct pcpu_group_info *gi = &ai->groups[group];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2783) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2784) * Initialize base_offset as if all groups are located
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2785) * back-to-back. The caller should update this to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2786) * reflect actual allocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2787) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2788) gi->base_offset = unit * ai->unit_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2790) for_each_possible_cpu(cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2791) if (group_map[cpu] == group)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2792) gi->cpu_map[gi->nr_units++] = cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2793) gi->nr_units = roundup(gi->nr_units, upa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2794) unit += gi->nr_units;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2795) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2796) BUG_ON(unit != nr_units);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2797)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2798) return ai;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2799) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2800) #endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2801)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2802) #if defined(BUILD_EMBED_FIRST_CHUNK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2803) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2804) * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2805) * @reserved_size: the size of reserved percpu area in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2806) * @dyn_size: minimum free size for dynamic allocation in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2807) * @atom_size: allocation atom size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2808) * @cpu_distance_fn: callback to determine distance between cpus, optional
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2809) * @alloc_fn: function to allocate percpu page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2810) * @free_fn: function to free percpu page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2811) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2812) * This is a helper to ease setting up embedded first percpu chunk and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2813) * can be called where pcpu_setup_first_chunk() is expected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2814) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2815) * If this function is used to setup the first chunk, it is allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2816) * by calling @alloc_fn and used as-is without being mapped into
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2817) * vmalloc area. Allocations are always whole multiples of @atom_size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2818) * aligned to @atom_size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2819) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2820) * This enables the first chunk to piggy back on the linear physical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2821) * mapping which often uses larger page size. Please note that this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2822) * can result in very sparse cpu->unit mapping on NUMA machines thus
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2823) * requiring large vmalloc address space. Don't use this allocator if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2824) * vmalloc space is not orders of magnitude larger than distances
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2825) * between node memory addresses (ie. 32bit NUMA machines).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2826) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2827) * @dyn_size specifies the minimum dynamic area size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2828) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2829) * If the needed size is smaller than the minimum or specified unit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2830) * size, the leftover is returned using @free_fn.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2831) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2832) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2833) * 0 on success, -errno on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2834) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2835) int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2836) size_t atom_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2837) pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2838) pcpu_fc_alloc_fn_t alloc_fn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2839) pcpu_fc_free_fn_t free_fn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2840) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2841) void *base = (void *)ULONG_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2842) void **areas = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2843) struct pcpu_alloc_info *ai;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2844) size_t size_sum, areas_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2845) unsigned long max_distance;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2846) int group, i, highest_group, rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2847)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2848) ai = pcpu_build_alloc_info(reserved_size, dyn_size, atom_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2849) cpu_distance_fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2850) if (IS_ERR(ai))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2851) return PTR_ERR(ai);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2853) size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2854) areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2855)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2856) areas = memblock_alloc(areas_size, SMP_CACHE_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2857) if (!areas) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2858) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2859) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2860) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2862) /* allocate, copy and determine base address & max_distance */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2863) highest_group = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2864) for (group = 0; group < ai->nr_groups; group++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2865) struct pcpu_group_info *gi = &ai->groups[group];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2866) unsigned int cpu = NR_CPUS;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2867) void *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2869) for (i = 0; i < gi->nr_units && cpu == NR_CPUS; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2870) cpu = gi->cpu_map[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2871) BUG_ON(cpu == NR_CPUS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2872)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2873) /* allocate space for the whole group */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2874) ptr = alloc_fn(cpu, gi->nr_units * ai->unit_size, atom_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2875) if (!ptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2876) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2877) goto out_free_areas;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2878) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2879) /* kmemleak tracks the percpu allocations separately */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2880) kmemleak_free(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2881) areas[group] = ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2882)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2883) base = min(ptr, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2884) if (ptr > areas[highest_group])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2885) highest_group = group;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2886) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2887) max_distance = areas[highest_group] - base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2888) max_distance += ai->unit_size * ai->groups[highest_group].nr_units;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2889)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2890) /* warn if maximum distance is further than 75% of vmalloc space */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2891) if (max_distance > VMALLOC_TOTAL * 3 / 4) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2892) pr_warn("max_distance=0x%lx too large for vmalloc space 0x%lx\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2893) max_distance, VMALLOC_TOTAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2894) #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2895) /* and fail if we have fallback */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2896) rc = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2897) goto out_free_areas;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2898) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2900)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2901) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2902) * Copy data and free unused parts. This should happen after all
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2903) * allocations are complete; otherwise, we may end up with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2904) * overlapping groups.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2905) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2906) for (group = 0; group < ai->nr_groups; group++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2907) struct pcpu_group_info *gi = &ai->groups[group];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2908) void *ptr = areas[group];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2909)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2910) for (i = 0; i < gi->nr_units; i++, ptr += ai->unit_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2911) if (gi->cpu_map[i] == NR_CPUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2912) /* unused unit, free whole */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2913) free_fn(ptr, ai->unit_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2914) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2915) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2916) /* copy and return the unused part */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2917) memcpy(ptr, __per_cpu_load, ai->static_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2918) free_fn(ptr + size_sum, ai->unit_size - size_sum);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2919) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2920) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2922) /* base address is now known, determine group base offsets */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2923) for (group = 0; group < ai->nr_groups; group++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2924) ai->groups[group].base_offset = areas[group] - base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2925) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2926)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2927) pr_info("Embedded %zu pages/cpu s%zu r%zu d%zu u%zu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2928) PFN_DOWN(size_sum), ai->static_size, ai->reserved_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2929) ai->dyn_size, ai->unit_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2930)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2931) pcpu_setup_first_chunk(ai, base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2932) goto out_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2933)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2934) out_free_areas:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2935) for (group = 0; group < ai->nr_groups; group++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2936) if (areas[group])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2937) free_fn(areas[group],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2938) ai->groups[group].nr_units * ai->unit_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2939) out_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2940) pcpu_free_alloc_info(ai);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2941) if (areas)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2942) memblock_free_early(__pa(areas), areas_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2943) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2944) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2945) #endif /* BUILD_EMBED_FIRST_CHUNK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2946)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2947) #ifdef BUILD_PAGE_FIRST_CHUNK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2948) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2949) * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2950) * @reserved_size: the size of reserved percpu area in bytes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2951) * @alloc_fn: function to allocate percpu page, always called with PAGE_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2952) * @free_fn: function to free percpu page, always called with PAGE_SIZE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2953) * @populate_pte_fn: function to populate pte
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2954) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2955) * This is a helper to ease setting up page-remapped first percpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2956) * chunk and can be called where pcpu_setup_first_chunk() is expected.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2957) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2958) * This is the basic allocator. Static percpu area is allocated
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2959) * page-by-page into vmalloc area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2960) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2961) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2962) * 0 on success, -errno on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2963) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2964) int __init pcpu_page_first_chunk(size_t reserved_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2965) pcpu_fc_alloc_fn_t alloc_fn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2966) pcpu_fc_free_fn_t free_fn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2967) pcpu_fc_populate_pte_fn_t populate_pte_fn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2968) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2969) static struct vm_struct vm;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2970) struct pcpu_alloc_info *ai;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2971) char psize_str[16];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2972) int unit_pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2973) size_t pages_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2974) struct page **pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2975) int unit, i, j, rc = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2976) int upa;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2977) int nr_g0_units;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2978)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2979) snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2980)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2981) ai = pcpu_build_alloc_info(reserved_size, 0, PAGE_SIZE, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2982) if (IS_ERR(ai))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2983) return PTR_ERR(ai);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2984) BUG_ON(ai->nr_groups != 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2985) upa = ai->alloc_size/ai->unit_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2986) nr_g0_units = roundup(num_possible_cpus(), upa);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2987) if (WARN_ON(ai->groups[0].nr_units != nr_g0_units)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2988) pcpu_free_alloc_info(ai);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2989) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2990) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2991)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2992) unit_pages = ai->unit_size >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2994) /* unaligned allocations can't be freed, round up to page size */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2995) pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2996) sizeof(pages[0]));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2997) pages = memblock_alloc(pages_size, SMP_CACHE_BYTES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2998) if (!pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2999) panic("%s: Failed to allocate %zu bytes\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3000) pages_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3002) /* allocate pages */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3003) j = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3004) for (unit = 0; unit < num_possible_cpus(); unit++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3005) unsigned int cpu = ai->groups[0].cpu_map[unit];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3006) for (i = 0; i < unit_pages; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3007) void *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3008)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3009) ptr = alloc_fn(cpu, PAGE_SIZE, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3010) if (!ptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3011) pr_warn("failed to allocate %s page for cpu%u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3012) psize_str, cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3013) goto enomem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3014) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3015) /* kmemleak tracks the percpu allocations separately */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3016) kmemleak_free(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3017) pages[j++] = virt_to_page(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3018) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3019) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3020)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3021) /* allocate vm area, map the pages and copy static data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3022) vm.flags = VM_ALLOC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3023) vm.size = num_possible_cpus() * ai->unit_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3024) vm_area_register_early(&vm, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3025)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3026) for (unit = 0; unit < num_possible_cpus(); unit++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3027) unsigned long unit_addr =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3028) (unsigned long)vm.addr + unit * ai->unit_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3029)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3030) for (i = 0; i < unit_pages; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3031) populate_pte_fn(unit_addr + (i << PAGE_SHIFT));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3032)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3033) /* pte already populated, the following shouldn't fail */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3034) rc = __pcpu_map_pages(unit_addr, &pages[unit * unit_pages],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3035) unit_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3036) if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3037) panic("failed to map percpu area, err=%d\n", rc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3038)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3039) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3040) * FIXME: Archs with virtual cache should flush local
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3041) * cache for the linear mapping here - something
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3042) * equivalent to flush_cache_vmap() on the local cpu.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3043) * flush_cache_vmap() can't be used as most supporting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3044) * data structures are not set up yet.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3045) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3046)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3047) /* copy static data */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3048) memcpy((void *)unit_addr, __per_cpu_load, ai->static_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3049) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3050)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3051) /* we're ready, commit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3052) pr_info("%d %s pages/cpu s%zu r%zu d%zu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3053) unit_pages, psize_str, ai->static_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3054) ai->reserved_size, ai->dyn_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3055)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3056) pcpu_setup_first_chunk(ai, vm.addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3057) goto out_free_ar;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3058)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3059) enomem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3060) while (--j >= 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3061) free_fn(page_address(pages[j]), PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3062) rc = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3063) out_free_ar:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3064) memblock_free_early(__pa(pages), pages_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3065) pcpu_free_alloc_info(ai);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3066) return rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3067) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3068) #endif /* BUILD_PAGE_FIRST_CHUNK */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3069)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3070) #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3071) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3072) * Generic SMP percpu area setup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3073) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3074) * The embedding helper is used because its behavior closely resembles
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3075) * the original non-dynamic generic percpu area setup. This is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3076) * important because many archs have addressing restrictions and might
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3077) * fail if the percpu area is located far away from the previous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3078) * location. As an added bonus, in non-NUMA cases, embedding is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3079) * generally a good idea TLB-wise because percpu area can piggy back
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3080) * on the physical linear memory mapping which uses large page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3081) * mappings on applicable archs.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3082) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3083) unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3084) EXPORT_SYMBOL(__per_cpu_offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3085)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3086) static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3087) size_t align)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3088) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3089) return memblock_alloc_from(size, align, __pa(MAX_DMA_ADDRESS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3090) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3091)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3092) static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3093) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3094) memblock_free_early(__pa(ptr), size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3097) void __init setup_per_cpu_areas(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3098) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3099) unsigned long delta;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3100) unsigned int cpu;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3101) int rc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3103) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3104) * Always reserve area for module percpu variables. That's
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3105) * what the legacy allocator did.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3106) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3107) rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3108) PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3109) pcpu_dfl_fc_alloc, pcpu_dfl_fc_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3110) if (rc < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3111) panic("Failed to initialize percpu areas.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3113) delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3114) for_each_possible_cpu(cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3115) __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3116) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3117) #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3118)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3119) #else /* CONFIG_SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3121) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3122) * UP percpu area setup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3123) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3124) * UP always uses km-based percpu allocator with identity mapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3125) * Static percpu variables are indistinguishable from the usual static
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3126) * variables and don't require any special preparation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3127) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3128) void __init setup_per_cpu_areas(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3129) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3130) const size_t unit_size =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3131) roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3132) PERCPU_DYNAMIC_RESERVE));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3133) struct pcpu_alloc_info *ai;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3134) void *fc;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3136) ai = pcpu_alloc_alloc_info(1, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3137) fc = memblock_alloc_from(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3138) if (!ai || !fc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3139) panic("Failed to allocate memory for percpu areas.");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3140) /* kmemleak tracks the percpu allocations separately */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3141) kmemleak_free(fc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3143) ai->dyn_size = unit_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3144) ai->unit_size = unit_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3145) ai->atom_size = unit_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3146) ai->alloc_size = unit_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3147) ai->groups[0].nr_units = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3148) ai->groups[0].cpu_map[0] = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3150) pcpu_setup_first_chunk(ai, fc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3151) pcpu_free_alloc_info(ai);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3154) #endif /* CONFIG_SMP */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3156) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3157) * pcpu_nr_pages - calculate total number of populated backing pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3158) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3159) * This reflects the number of pages populated to back chunks. Metadata is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3160) * excluded in the number exposed in meminfo as the number of backing pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3161) * scales with the number of cpus and can quickly outweigh the memory used for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3162) * metadata. It also keeps this calculation nice and simple.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3163) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3164) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3165) * Total number of populated backing pages in use by the allocator.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3166) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3167) unsigned long pcpu_nr_pages(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3169) return pcpu_nr_populated * pcpu_nr_units;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3170) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3171) EXPORT_SYMBOL_GPL(pcpu_nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3173) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3174) * Percpu allocator is initialized early during boot when neither slab or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3175) * workqueue is available. Plug async management until everything is up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3176) * and running.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3177) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3178) static int __init percpu_enable_async(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3179) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3180) pcpu_async_enabled = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3181) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3182) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3183) subsys_initcall(percpu_enable_async);