^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Contiguous Memory Allocator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2010-2011 by Samsung Electronics.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright IBM Corporation, 2013
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Copyright LG Electronics Inc., 2014
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Written by:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Marek Szyprowski <m.szyprowski@samsung.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Michal Nazarewicz <mina86@mina86.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Joonsoo Kim <iamjoonsoo.kim@lge.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #define pr_fmt(fmt) "cma: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #ifdef CONFIG_CMA_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #ifndef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) # define DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #define CREATE_TRACE_POINTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/sizes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/log2.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/cma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/kmemleak.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) #include <linux/jiffies.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <trace/events/cma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #undef CREATE_TRACE_POINTS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #include <trace/hooks/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #include "cma.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) extern void lru_cache_disable(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) extern void lru_cache_enable(void);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) struct cma cma_areas[MAX_CMA_AREAS];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) unsigned cma_area_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) phys_addr_t cma_get_base(const struct cma *cma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) return PFN_PHYS(cma->base_pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) unsigned long cma_get_size(const struct cma *cma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) return cma->count << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) const char *cma_get_name(const struct cma *cma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) return cma->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) EXPORT_SYMBOL_GPL(cma_get_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) static unsigned long cma_bitmap_aligned_mask(const struct cma *cma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) unsigned int align_order)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) if (align_order <= cma->order_per_bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) return (1UL << (align_order - cma->order_per_bit)) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * Find the offset of the base PFN from the specified align_order.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * The value returned is represented in order_per_bits.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) static unsigned long cma_bitmap_aligned_offset(const struct cma *cma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) unsigned int align_order)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) return (cma->base_pfn & ((1UL << align_order) - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) >> cma->order_per_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) static unsigned long cma_bitmap_pages_to_bits(const struct cma *cma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) unsigned long pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) return ALIGN(pages, 1UL << cma->order_per_bit) >> cma->order_per_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) static void cma_clear_bitmap(struct cma *cma, unsigned long pfn,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) unsigned long bitmap_no, bitmap_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) bitmap_no = (pfn - cma->base_pfn) >> cma->order_per_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) bitmap_count = cma_bitmap_pages_to_bits(cma, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) mutex_lock(&cma->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) bitmap_clear(cma->bitmap, bitmap_no, bitmap_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) mutex_unlock(&cma->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) static void __init cma_activate_area(struct cma *cma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) unsigned long base_pfn = cma->base_pfn, pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) struct zone *zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) cma->bitmap = bitmap_zalloc(cma_bitmap_maxno(cma), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) if (!cma->bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) goto out_error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) if (IS_ENABLED(CONFIG_CMA_INACTIVE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * alloc_contig_range() requires the pfn range specified to be in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * same zone. Simplify by forcing the entire CMA resv range to be in the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * same zone.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) WARN_ON_ONCE(!pfn_valid(base_pfn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) zone = page_zone(pfn_to_page(base_pfn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) for (pfn = base_pfn + 1; pfn < base_pfn + cma->count; pfn++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) WARN_ON_ONCE(!pfn_valid(pfn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) if (page_zone(pfn_to_page(pfn)) != zone)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) goto not_in_zone;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) for (pfn = base_pfn; pfn < base_pfn + cma->count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) pfn += pageblock_nr_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) init_cma_reserved_pageblock(pfn_to_page(pfn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) mutex_init(&cma->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) #ifdef CONFIG_CMA_DEBUGFS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) INIT_HLIST_HEAD(&cma->mem_head);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) spin_lock_init(&cma->mem_head_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) not_in_zone:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) bitmap_free(cma->bitmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) out_error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) /* Expose all pages to the buddy, they are useless for CMA. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) for (pfn = base_pfn; pfn < base_pfn + cma->count; pfn++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) free_reserved_page(pfn_to_page(pfn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) totalcma_pages -= cma->count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) cma->count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) pr_err("CMA area %s could not be activated\n", cma->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) static int __init cma_init_reserved_areas(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) for (i = 0; i < cma_area_count; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) cma_activate_area(&cma_areas[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) core_initcall(cma_init_reserved_areas);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * cma_init_reserved_mem() - create custom contiguous area from reserved memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) * @base: Base address of the reserved area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) * @size: Size of the reserved area (in bytes),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * @order_per_bit: Order of pages represented by one bit on bitmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * @name: The name of the area. If this parameter is NULL, the name of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * the area will be set to "cmaN", where N is a running counter of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * used areas.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * @res_cma: Pointer to store the created cma region.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * This function creates custom contiguous area from already reserved memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) int __init cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) unsigned int order_per_bit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) const char *name,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) struct cma **res_cma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) struct cma *cma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) #if !IS_ENABLED(CONFIG_CMA_INACTIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) phys_addr_t alignment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) /* Sanity checks */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) if (cma_area_count == ARRAY_SIZE(cma_areas)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) pr_err("Not enough slots for CMA reserved regions!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) if (!size || !memblock_is_region_reserved(base, size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) #if !IS_ENABLED(CONFIG_CMA_INACTIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) /* ensure minimal alignment required by mm core */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) alignment = PAGE_SIZE <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) max_t(unsigned long, MAX_ORDER - 1, pageblock_order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) /* alignment should be aligned with order_per_bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) if (!IS_ALIGNED(alignment >> PAGE_SHIFT, 1 << order_per_bit))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) if (ALIGN(base, alignment) != base || ALIGN(size, alignment) != size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * Each reserved area must be initialised later, when more kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * subsystems (like slab allocator) are available.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) cma = &cma_areas[cma_area_count];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) if (name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) snprintf(cma->name, CMA_MAX_NAME, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) snprintf(cma->name, CMA_MAX_NAME, "cma%d\n", cma_area_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) cma->base_pfn = PFN_DOWN(base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) cma->count = size >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) cma->order_per_bit = order_per_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) *res_cma = cma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) cma_area_count++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) totalcma_pages += (size / PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) * cma_declare_contiguous_nid() - reserve custom contiguous area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) * @base: Base address of the reserved area optional, use 0 for any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) * @size: Size of the reserved area (in bytes),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * @limit: End address of the reserved memory (optional, 0 for any).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * @alignment: Alignment for the CMA area, should be power of 2 or zero
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) * @order_per_bit: Order of pages represented by one bit on bitmap.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * @fixed: hint about where to place the reserved area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * @name: The name of the area. See function cma_init_reserved_mem()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) * @res_cma: Pointer to store the created cma region.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * This function reserves memory from early allocator. It should be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * called by arch specific code once the early allocator (memblock or bootmem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) * has been activated and all other subsystems have already allocated/reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) * memory. This function allows to create custom reserved areas.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * If @fixed is true, reserve contiguous area at exactly @base. If false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * reserve in range from @base to @limit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) int __init cma_declare_contiguous_nid(phys_addr_t base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) phys_addr_t size, phys_addr_t limit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) phys_addr_t alignment, unsigned int order_per_bit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) bool fixed, const char *name, struct cma **res_cma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) int nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) phys_addr_t memblock_end = memblock_end_of_DRAM();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) phys_addr_t highmem_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * We can't use __pa(high_memory) directly, since high_memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) * isn't a valid direct map VA, and DEBUG_VIRTUAL will (validly)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * complain. Find the boundary by adding one to the last valid
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * address.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) highmem_start = __pa(high_memory - 1) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) __func__, &size, &base, &limit, &alignment);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) if (cma_area_count == ARRAY_SIZE(cma_areas)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) pr_err("Not enough slots for CMA reserved regions!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) return -ENOSPC;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) if (!size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) if (alignment && !is_power_of_2(alignment))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) #if !IS_ENABLED(CONFIG_CMA_INACTIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) * Sanitise input arguments.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * Pages both ends in CMA area could be merged into adjacent unmovable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) * migratetype page by page allocator's buddy algorithm. In the case,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) * you couldn't get a contiguous memory, which is not what we want.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) alignment = max(alignment, (phys_addr_t)PAGE_SIZE <<
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) max_t(unsigned long, MAX_ORDER - 1, pageblock_order));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) if (fixed && base & (alignment - 1)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) pr_err("Region at %pa must be aligned to %pa bytes\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) &base, &alignment);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) base = ALIGN(base, alignment);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) size = ALIGN(size, alignment);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) limit &= ~(alignment - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) if (!base)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) fixed = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) /* size should be aligned with order_per_bit */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * If allocating at a fixed base the request region must not cross the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * low/high memory boundary.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) if (fixed && base < highmem_start && base + size > highmem_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) &base, &highmem_start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) * If the limit is unspecified or above the memblock end, its effective
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) * value will be the memblock end. Set it explicitly to simplify further
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) * checks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) if (limit == 0 || limit > memblock_end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) limit = memblock_end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) if (base + size > limit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) ret = -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) pr_err("Size (%pa) of region at %pa exceeds limit (%pa)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) &size, &base, &limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) /* Reserve memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) if (fixed) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) if (memblock_is_region_reserved(base, size) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) memblock_reserve(base, size) < 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) ret = -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) phys_addr_t addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * All pages in the reserved area must come from the same zone.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) * If the requested region crosses the low/high memory boundary,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) * try allocating from high memory first and fall back to low
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) * memory in case of failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) if (base < highmem_start && limit > highmem_start) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) addr = memblock_alloc_range_nid(size, alignment,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) highmem_start, limit, nid, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) limit = highmem_start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) * If there is enough memory, try a bottom-up allocation first.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * It will place the new cma area close to the start of the node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) * and guarantee that the compaction is moving pages out of the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) * cma area and not into it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) * Avoid using first 4GB to not interfere with constrained zones
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) * like DMA/DMA32.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) #ifdef CONFIG_PHYS_ADDR_T_64BIT
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) if (!memblock_bottom_up() && memblock_end >= SZ_4G + size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) memblock_set_bottom_up(true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) addr = memblock_alloc_range_nid(size, alignment, SZ_4G,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) limit, nid, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) memblock_set_bottom_up(false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) if (!addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) addr = memblock_alloc_range_nid(size, alignment, base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) limit, nid, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) if (!addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * kmemleak scans/reads tracked objects for pointers to other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * objects but this address isn't mapped and accessible
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) kmemleak_ignore_phys(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) base = addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) ret = cma_init_reserved_mem(base, size, order_per_bit, name, res_cma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) goto free_mem;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) #if !IS_ENABLED(CONFIG_CMA_INACTIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) &base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) pr_info("Reserved %ld KiB at %pa\n", (unsigned long)size / SZ_1K,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) &base);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) free_mem:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) memblock_free(base, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) #if !IS_ENABLED(CONFIG_CMA_INACTIVE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) pr_err("Failed to reserve %ld MiB\n", (unsigned long)size / SZ_1M);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) pr_err("Failed to reserve %ld KiB\n", (unsigned long)size / SZ_1K);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) #ifdef CONFIG_CMA_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) static void cma_debug_show_areas(struct cma *cma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) unsigned long next_zero_bit, next_set_bit, nr_zero;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) unsigned long start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) unsigned long nr_part, nr_total = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) unsigned long nbits = cma_bitmap_maxno(cma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) mutex_lock(&cma->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) pr_info("number of available pages: ");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) next_zero_bit = find_next_zero_bit(cma->bitmap, nbits, start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) if (next_zero_bit >= nbits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) next_set_bit = find_next_bit(cma->bitmap, nbits, next_zero_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) nr_zero = next_set_bit - next_zero_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) nr_part = nr_zero << cma->order_per_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) pr_cont("%s%lu@%lu", nr_total ? "+" : "", nr_part,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) next_zero_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) nr_total += nr_part;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) start = next_zero_bit + nr_zero;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) pr_cont("=> %lu free of %lu total pages\n", nr_total, cma->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) mutex_unlock(&cma->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) static inline void cma_debug_show_areas(struct cma *cma) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) * cma_alloc() - allocate pages from contiguous area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) * @cma: Contiguous memory region for which the allocation is performed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) * @count: Requested number of pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) * @align: Requested alignment of pages (in PAGE_SIZE order).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) * @gfp_mask: GFP mask to use during the cma allocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) * This function allocates part of contiguous memory on specific
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) * contiguous memory area.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) gfp_t gfp_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) unsigned long mask, offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) unsigned long pfn = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) unsigned long start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) unsigned long bitmap_maxno, bitmap_no, bitmap_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) size_t i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) struct page *page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) int ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) int num_attempts = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) int max_retries = 5;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) s64 ts;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) struct cma_alloc_info cma_info = {0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) trace_android_vh_cma_alloc_start(&ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) if (!cma || !cma->count || !cma->bitmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) pr_debug("%s(cma %p, count %zu, align %d gfp_mask 0x%x)\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) (void *)cma, count, align, gfp_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) if (!count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) trace_cma_alloc_start(cma->name, count, align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) mask = cma_bitmap_aligned_mask(cma, align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) offset = cma_bitmap_aligned_offset(cma, align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) bitmap_maxno = cma_bitmap_maxno(cma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) bitmap_count = cma_bitmap_pages_to_bits(cma, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) if (bitmap_count > bitmap_maxno)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) lru_cache_disable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) for (;;) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) struct acr_info info = {0};
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) mutex_lock(&cma->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) bitmap_no = bitmap_find_next_zero_area_off(cma->bitmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) bitmap_maxno, start, bitmap_count, mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) if (bitmap_no >= bitmap_maxno) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) if ((num_attempts < max_retries) && (ret == -EBUSY)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) mutex_unlock(&cma->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) if (fatal_signal_pending(current) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) (gfp_mask & __GFP_NORETRY))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) * Page may be momentarily pinned by some other
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) * process which has been scheduled out, e.g.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) * in exit path, during unmap call, or process
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) * fork and so cannot be freed there. Sleep
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) * for 100ms and retry the allocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) start = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) schedule_timeout_killable(msecs_to_jiffies(100));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) num_attempts++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) mutex_unlock(&cma->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) bitmap_set(cma->bitmap, bitmap_no, bitmap_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) * It's safe to drop the lock here. We've marked this region for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) * our exclusive use. If the migration fails we will take the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) * lock again and unmark it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) mutex_unlock(&cma->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) pfn = cma->base_pfn + (bitmap_no << cma->order_per_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) if (IS_ENABLED(CONFIG_CMA_INACTIVE)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) page = pfn_to_page(pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) lru_cache_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA, gfp_mask, &info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) cma_info.nr_migrated += info.nr_migrated;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) cma_info.nr_reclaimed += info.nr_reclaimed;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) cma_info.nr_mapped += info.nr_mapped;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) if (info.err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) if (info.err & ACR_ERR_ISOLATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) cma_info.nr_isolate_fail++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) if (info.err & ACR_ERR_MIGRATE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) cma_info.nr_migrate_fail++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) if (info.err & ACR_ERR_TEST)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) cma_info.nr_test_fail++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) if (ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) page = pfn_to_page(pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) cma_clear_bitmap(cma, pfn, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) if (ret != -EBUSY)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) pr_debug("%s(): memory range at %p is busy, retrying\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) __func__, pfn_to_page(pfn));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) trace_cma_alloc_busy_retry(cma->name, pfn, pfn_to_page(pfn),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) count, align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) if (info.failed_pfn && gfp_mask & __GFP_NORETRY) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) /* try again from following failed page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) start = (pfn_max_align_up(info.failed_pfn + 1) -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) cma->base_pfn) >> cma->order_per_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) /* try again with a bit different memory target */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) start = bitmap_no + mask + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) lru_cache_enable();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) trace_cma_alloc_finish(cma->name, pfn, page, count, align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) trace_cma_alloc_info(cma->name, page, count, align, &cma_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) * CMA can allocate multiple page blocks, which results in different
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) * blocks being marked with different tags. Reset the tags to ignore
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) * those page blocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) if (page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) for (i = 0; i < count; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) page_kasan_tag_reset(page + i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) if (ret && !(gfp_mask & __GFP_NOWARN)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) pr_err("%s: %s: alloc failed, req-size: %zu pages, ret: %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) __func__, cma->name, count, ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) cma_debug_show_areas(cma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) pr_debug("%s(): returned %p\n", __func__, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) trace_android_vh_cma_alloc_finish(cma, page, count, align, gfp_mask, ts);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) if (page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) count_vm_event(CMA_ALLOC_SUCCESS);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) cma_sysfs_account_success_pages(cma, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) count_vm_event(CMA_ALLOC_FAIL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) if (cma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) cma_sysfs_account_fail_pages(cma, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) return page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) EXPORT_SYMBOL_GPL(cma_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) * cma_release() - release allocated pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) * @cma: Contiguous memory region for which the allocation is performed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) * @pages: Allocated pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) * @count: Number of allocated pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) * This function releases memory allocated by cma_alloc().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) * It returns false when provided pages do not belong to contiguous area and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) * true otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) bool cma_release(struct cma *cma, const struct page *pages, unsigned int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) unsigned long pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) if (!cma || !pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) pr_debug("%s(page %p, count %u)\n", __func__, (void *)pages, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) pfn = page_to_pfn(pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) if (!IS_ENABLED(CONFIG_CMA_INACTIVE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) free_contig_range(pfn, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) cma_clear_bitmap(cma, pfn, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) trace_cma_release(cma->name, pfn, pages, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) EXPORT_SYMBOL_GPL(cma_release);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) #ifdef CONFIG_NO_GKI
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) unsigned long cma_used_pages(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) struct cma *cma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) unsigned long used;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) unsigned long val = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) for (i = 0; i < cma_area_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) cma = &cma_areas[i];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) mutex_lock(&cma->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) used = bitmap_weight(cma->bitmap, (int)cma_bitmap_maxno(cma));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) mutex_unlock(&cma->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) val += used << cma->order_per_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) return val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) EXPORT_SYMBOL_GPL(cma_used_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) for (i = 0; i < cma_area_count; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) int ret = it(&cma_areas[i], data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) EXPORT_SYMBOL_GPL(cma_for_each_area);