^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0+
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Contiguous Memory Allocator for DMA mapping framework
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (c) 2010-2011 by Samsung Electronics.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Written by:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Marek Szyprowski <m.szyprowski@samsung.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Michal Nazarewicz <mina86@mina86.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Contiguous Memory Allocator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * The Contiguous Memory Allocator (CMA) makes it possible to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * allocate big contiguous chunks of memory after the system has
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * booted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * Why is it needed?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * Various devices on embedded systems have no scatter-getter and/or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * IO map support and require contiguous blocks of memory to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * operate. They include devices such as cameras, hardware video
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * coders, etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * Such devices often require big memory buffers (a full HD frame
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * is, for instance, more then 2 mega pixels large, i.e. more than 6
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * MB of memory), which makes mechanisms such as kmalloc() or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * alloc_page() ineffective.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * At the same time, a solution where a big memory region is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * reserved for a device is suboptimal since often more memory is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * reserved then strictly required and, moreover, the memory is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) * inaccessible to page system even if device drivers don't use it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) * CMA tries to solve this issue by operating on memory regions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * where only movable pages can be allocated from. This way, kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * can use the memory for pagecache and when device driver requests
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * it, allocated pages can be migrated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #define pr_fmt(fmt) "cma: " fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #ifdef CONFIG_CMA_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #ifndef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) # define DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) #include <asm/page.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) #include <linux/err.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) #include <linux/sizes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) #include <linux/dma-map-ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #include <linux/cma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) #include <trace/hooks/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) #ifdef CONFIG_CMA_SIZE_MBYTES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) #define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) #define CMA_SIZE_MBYTES 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) struct cma *dma_contiguous_default_area;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) EXPORT_SYMBOL_GPL(dma_contiguous_default_area);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) * Default global CMA area size can be defined in kernel's .config.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) * This is useful mainly for distro maintainers to create a kernel
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) * that works correctly for most supported systems.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) * The size can be set in bytes or as a percentage of the total memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * in the system.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * Users, who want to set the size of global CMA area for their system
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * should use cma= kernel parameter.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) static const phys_addr_t size_bytes __initconst =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) (phys_addr_t)CMA_SIZE_MBYTES * SZ_1M;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) static phys_addr_t size_cmdline __initdata = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) static phys_addr_t base_cmdline __initdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) static phys_addr_t limit_cmdline __initdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) static int __init early_cma(char *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) if (!p) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) pr_err("Config string not provided\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) size_cmdline = memparse(p, &p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) if (*p != '@')
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) base_cmdline = memparse(p + 1, &p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) if (*p != '-') {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) limit_cmdline = base_cmdline + size_cmdline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) limit_cmdline = memparse(p + 1, &p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) early_param("cma", early_cma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #ifdef CONFIG_DMA_PERNUMA_CMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) static struct cma *dma_contiguous_pernuma_area[MAX_NUMNODES];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) static phys_addr_t pernuma_size_bytes __initdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) static int __init early_cma_pernuma(char *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) pernuma_size_bytes = memparse(p, &p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) early_param("cma_pernuma", early_cma_pernuma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #ifdef CONFIG_CMA_SIZE_PERCENTAGE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) static phys_addr_t __init __maybe_unused cma_early_percent_memory(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) unsigned long total_pages = PHYS_PFN(memblock_phys_mem_size());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) return (total_pages * CONFIG_CMA_SIZE_PERCENTAGE / 100) << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) static inline __maybe_unused phys_addr_t cma_early_percent_memory(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) #ifdef CONFIG_DMA_PERNUMA_CMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) void __init dma_pernuma_cma_reserve(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) int nid;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (!pernuma_size_bytes)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) for_each_online_node(nid) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) char name[CMA_MAX_NAME];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) struct cma **cma = &dma_contiguous_pernuma_area[nid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) snprintf(name, sizeof(name), "pernuma%d", nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) ret = cma_declare_contiguous_nid(0, pernuma_size_bytes, 0, 0,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) 0, false, name, cma, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) pr_warn("%s: reservation failed: err %d, node %d", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) ret, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) pr_debug("%s: reserved %llu MiB on node %d\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) (unsigned long long)pernuma_size_bytes / SZ_1M, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) * dma_contiguous_reserve() - reserve area(s) for contiguous memory handling
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) * @limit: End address of the reserved memory (optional, 0 for any).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) * This function reserves memory from early allocator. It should be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) * called by arch specific code once the early allocator (memblock or bootmem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) * has been activated and all other subsystems have already allocated/reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) * memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) void __init dma_contiguous_reserve(phys_addr_t limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) phys_addr_t selected_size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) phys_addr_t selected_base = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) phys_addr_t selected_limit = limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) bool fixed = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) if (size_cmdline != -1) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) selected_size = size_cmdline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) selected_base = base_cmdline;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) selected_limit = min_not_zero(limit_cmdline, limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) if (base_cmdline + size_cmdline == limit_cmdline)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) fixed = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) #ifdef CONFIG_CMA_SIZE_SEL_MBYTES
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) selected_size = size_bytes;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) #elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) selected_size = cma_early_percent_memory();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) #elif defined(CONFIG_CMA_SIZE_SEL_MIN)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) selected_size = min(size_bytes, cma_early_percent_memory());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) #elif defined(CONFIG_CMA_SIZE_SEL_MAX)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) selected_size = max(size_bytes, cma_early_percent_memory());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) if (selected_size && !dma_contiguous_default_area) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) pr_debug("%s: reserving %ld MiB for global area\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) (unsigned long)selected_size / SZ_1M);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) dma_contiguous_reserve_area(selected_size, selected_base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) selected_limit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) &dma_contiguous_default_area,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) fixed);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) void __weak
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) * dma_contiguous_reserve_area() - reserve custom contiguous area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * @size: Size of the reserved area (in bytes),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) * @base: Base address of the reserved area optional, use 0 for any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) * @limit: End address of the reserved memory (optional, 0 for any).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) * @res_cma: Pointer to store the created cma region.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) * @fixed: hint about where to place the reserved area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) * This function reserves memory from early allocator. It should be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) * called by arch specific code once the early allocator (memblock or bootmem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) * has been activated and all other subsystems have already allocated/reserved
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) * memory. This function allows to create custom reserved areas for specific
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) * devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) * If @fixed is true, reserve contiguous area at exactly @base. If false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) * reserve in range from @base to @limit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) phys_addr_t limit, struct cma **res_cma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) bool fixed)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) ret = cma_declare_contiguous(base, size, limit, 0, 0, fixed,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) "reserved", res_cma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) /* Architecture specific contiguous memory fixup. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) dma_contiguous_early_fixup(cma_get_base(*res_cma),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) cma_get_size(*res_cma));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) * dma_alloc_from_contiguous() - allocate pages from contiguous area
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) * @dev: Pointer to device for which the allocation is performed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) * @count: Requested number of pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) * @align: Requested alignment of pages (in PAGE_SIZE order).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) * @no_warn: Avoid printing message about failed allocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) * This function allocates memory buffer for specified device. It uses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) * device specific contiguous memory area if available or the default
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) * global one. Requires architecture specific dev_get_cma_area() helper
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) * function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) unsigned int align, bool no_warn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) if (align > CONFIG_CMA_ALIGNMENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) align = CONFIG_CMA_ALIGNMENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) return cma_alloc(dev_get_cma_area(dev), count, align, GFP_KERNEL |
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) (no_warn ? __GFP_NOWARN : 0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) * dma_release_from_contiguous() - release allocated pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * @dev: Pointer to device for which the pages were allocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) * @pages: Allocated pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) * @count: Number of allocated pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) * This function releases memory allocated by dma_alloc_from_contiguous().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) * It returns false when provided pages do not belong to contiguous area and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) * true otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) bool dma_release_from_contiguous(struct device *dev, struct page *pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) int count)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) return cma_release(dev_get_cma_area(dev), pages, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) static struct page *cma_alloc_aligned(struct cma *cma, size_t size, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) unsigned int align = min(get_order(size), CONFIG_CMA_ALIGNMENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) return cma_alloc(cma, size >> PAGE_SHIFT, align,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) GFP_KERNEL | (gfp & __GFP_NOWARN));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) * dma_alloc_contiguous() - allocate contiguous pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) * @dev: Pointer to device for which the allocation is performed.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) * @size: Requested allocation size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * @gfp: Allocation flags.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * tries to use device specific contiguous memory area if available, or it
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) * tries to use per-numa cma, if the allocation fails, it will fallback to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) * try default global one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) * Note that it bypass one-page size of allocations from the per-numa and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) * global area as the addresses within one page are always contiguous, so
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * there is no need to waste CMA pages for that kind; it also helps reduce
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * fragmentations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) #ifdef CONFIG_DMA_PERNUMA_CMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) int nid = dev_to_node(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) bool allow_subpage_alloc = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) /* CMA can be used only in the context which permits sleeping */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) if (!gfpflags_allow_blocking(gfp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) if (dev->cma_area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) return cma_alloc_aligned(dev->cma_area, size, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) if (size <= PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) trace_android_vh_subpage_dma_contig_alloc(&allow_subpage_alloc, dev, &size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) if (!allow_subpage_alloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) #ifdef CONFIG_DMA_PERNUMA_CMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) if (nid != NUMA_NO_NODE && !(gfp & (GFP_DMA | GFP_DMA32))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) struct cma *cma = dma_contiguous_pernuma_area[nid];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) if (cma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) page = cma_alloc_aligned(cma, size, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) if (page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) return page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) if (!dma_contiguous_default_area)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) return cma_alloc_aligned(dma_contiguous_default_area, size, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * dma_free_contiguous() - release allocated pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) * @dev: Pointer to device for which the pages were allocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) * @page: Pointer to the allocated pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) * @size: Size of allocated pages.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) * This function releases memory allocated by dma_alloc_contiguous(). As the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) * cma_release returns false when provided pages do not belong to contiguous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * area and true otherwise, this function then does a fallback __free_pages()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) * upon a false-return.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) void dma_free_contiguous(struct device *dev, struct page *page, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) /* if dev has its own cma, free page from there */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) if (dev->cma_area) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) if (cma_release(dev->cma_area, page, count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) * otherwise, page is from either per-numa cma or default cma
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) #ifdef CONFIG_DMA_PERNUMA_CMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) if (cma_release(dma_contiguous_pernuma_area[page_to_nid(page)],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) page, count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) if (cma_release(dma_contiguous_default_area, page, count))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) /* not in any cma, free from buddy */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) __free_pages(page, get_order(size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * Support for reserved memory regions defined in device tree
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) #ifdef CONFIG_OF_RESERVED_MEM
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) #include <linux/of.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) #include <linux/of_fdt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) #include <linux/of_reserved_mem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) #undef pr_fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) #define pr_fmt(fmt) fmt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) static int rmem_cma_device_init(struct reserved_mem *rmem, struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) dev->cma_area = rmem->priv;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) static void rmem_cma_device_release(struct reserved_mem *rmem,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) dev->cma_area = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) static const struct reserved_mem_ops rmem_cma_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) .device_init = rmem_cma_device_init,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) .device_release = rmem_cma_device_release,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) static int __init rmem_cma_setup(struct reserved_mem *rmem)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) phys_addr_t align = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) phys_addr_t mask = align - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) unsigned long node = rmem->fdt_node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) bool default_cma = of_get_flat_dt_prop(node, "linux,cma-default", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) struct cma *cma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) if (size_cmdline != -1 && default_cma) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) pr_info("Reserved memory: bypass %s node, using cmdline CMA params instead\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) rmem->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) if (!of_get_flat_dt_prop(node, "reusable", NULL) ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) of_get_flat_dt_prop(node, "no-map", NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) if ((rmem->base & mask) || (rmem->size & mask)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) pr_err("Reserved memory: incorrect alignment of CMA region\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) err = cma_init_reserved_mem(rmem->base, rmem->size, 0, rmem->name, &cma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) pr_err("Reserved memory: unable to setup CMA region\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) /* Architecture specific contiguous memory fixup. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) dma_contiguous_early_fixup(rmem->base, rmem->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) if (default_cma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) dma_contiguous_default_area = cma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) rmem->ops = &rmem_cma_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) rmem->priv = cma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) pr_info("Reserved memory: created CMA memory pool at %pa, size %ld MiB\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) &rmem->base, (unsigned long)rmem->size / SZ_1M);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) RESERVEDMEM_OF_DECLARE(cma, "shared-dma-pool", rmem_cma_setup);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) #endif