^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2012 ARM Ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Copyright (C) 2020 Google LLC
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/cma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/debugfs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/dma-map-ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/dma-direct.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/genalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/set_memory.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/workqueue.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) static struct gen_pool *atomic_pool_dma __ro_after_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) static unsigned long pool_size_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) static struct gen_pool *atomic_pool_dma32 __ro_after_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) static unsigned long pool_size_dma32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) static struct gen_pool *atomic_pool_kernel __ro_after_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) static unsigned long pool_size_kernel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) /* Size can be defined by the coherent_pool command line */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) static size_t atomic_pool_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) /* Dynamic background expansion when the atomic pool is near capacity */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) static struct work_struct atomic_pool_work;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) static int __init early_coherent_pool(char *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) atomic_pool_size = memparse(p, &p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) early_param("coherent_pool", early_coherent_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) static void __init dma_atomic_pool_debugfs_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) struct dentry *root;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) root = debugfs_create_dir("dma_pools", NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) if (IS_ERR_OR_NULL(root))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) debugfs_create_ulong("pool_size_dma", 0400, root, &pool_size_dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) debugfs_create_ulong("pool_size_dma32", 0400, root, &pool_size_dma32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) debugfs_create_ulong("pool_size_kernel", 0400, root, &pool_size_kernel);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) static void dma_atomic_pool_size_add(gfp_t gfp, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) if (gfp & __GFP_DMA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) pool_size_dma += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) else if (gfp & __GFP_DMA32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) pool_size_dma32 += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) pool_size_kernel += size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) static bool cma_in_zone(gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) unsigned long size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) phys_addr_t end;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) struct cma *cma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) cma = dev_get_cma_area(NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) if (!cma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) size = cma_get_size(cma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) if (!size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) /* CMA can't cross zone boundaries, see cma_activate_area() */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) end = cma_get_base(cma) + size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp & GFP_DMA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) return end <= DMA_BIT_MASK(zone_dma_bits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp & GFP_DMA32) && !zone_dma32_are_empty())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) return end <= DMA_BIT_MASK(32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) unsigned int order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) struct page *page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) void *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) int ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) /* Cannot allocate larger than MAX_ORDER-1 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) order = min(get_order(pool_size), MAX_ORDER-1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) pool_size = 1 << (PAGE_SHIFT + order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) if (cma_in_zone(gfp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) page = dma_alloc_from_contiguous(NULL, 1 << order,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) order, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) page = alloc_pages(gfp, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) } while (!page && order-- > 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) arch_dma_prep_coherent(page, pool_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) #ifdef CONFIG_DMA_DIRECT_REMAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) addr = dma_common_contiguous_remap(page, pool_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) pgprot_dmacoherent(PAGE_KERNEL),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) __builtin_return_address(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) if (!addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) goto free_page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) addr = page_to_virt(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * Memory in the atomic DMA pools must be unencrypted, the pools do not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * shrink so no re-encryption occurs in dma_direct_free().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) ret = set_memory_decrypted((unsigned long)page_to_virt(page),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) 1 << order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) goto remove_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) ret = gen_pool_add_virt(pool, (unsigned long)addr, page_to_phys(page),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) pool_size, NUMA_NO_NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) goto encrypt_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) dma_atomic_pool_size_add(gfp, pool_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) encrypt_mapping:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) ret = set_memory_encrypted((unsigned long)page_to_virt(page),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) 1 << order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) if (WARN_ON_ONCE(ret)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) /* Decrypt succeeded but encrypt failed, purposely leak */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) remove_mapping:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) #ifdef CONFIG_DMA_DIRECT_REMAP
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) dma_common_free_remap(addr, pool_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) free_page: __maybe_unused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) __free_pages(page, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) static void atomic_pool_resize(struct gen_pool *pool, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) if (pool && gen_pool_avail(pool) < atomic_pool_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) atomic_pool_expand(pool, gen_pool_size(pool), gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) static void atomic_pool_work_fn(struct work_struct *work)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) if (IS_ENABLED(CONFIG_ZONE_DMA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) atomic_pool_resize(atomic_pool_dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) GFP_KERNEL | GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) if (IS_ENABLED(CONFIG_ZONE_DMA32) && !zone_dma32_are_empty())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) atomic_pool_resize(atomic_pool_dma32,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) GFP_KERNEL | GFP_DMA32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) atomic_pool_resize(atomic_pool_kernel, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) static __init struct gen_pool *__dma_atomic_pool_init(size_t pool_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) struct gen_pool *pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) pool = gen_pool_create(PAGE_SHIFT, NUMA_NO_NODE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) if (!pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) gen_pool_set_algo(pool, gen_pool_first_fit_order_align, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) ret = atomic_pool_expand(pool, pool_size, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) gen_pool_destroy(pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) pr_err("DMA: failed to allocate %zu KiB %pGg pool for atomic allocation\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) pool_size >> 10, &gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) pr_info("DMA: preallocated %zu KiB %pGg pool for atomic allocations\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) gen_pool_size(pool) >> 10, &gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) return pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) static int __init dma_atomic_pool_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) * If coherent_pool was not used on the command line, default the pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) * sizes to 128KB per 1GB of memory, min 128KB, max MAX_ORDER-1.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) if (!atomic_pool_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) unsigned long pages = totalram_pages() / (SZ_1G / SZ_128K);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) pages = min_t(unsigned long, pages, MAX_ORDER_NR_PAGES);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) atomic_pool_size = max_t(size_t, pages << PAGE_SHIFT, SZ_128K);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) INIT_WORK(&atomic_pool_work, atomic_pool_work_fn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) atomic_pool_kernel = __dma_atomic_pool_init(atomic_pool_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) if (!atomic_pool_kernel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) if (has_managed_dma()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) atomic_pool_dma = __dma_atomic_pool_init(atomic_pool_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) GFP_KERNEL | GFP_DMA);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) if (!atomic_pool_dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) if (IS_ENABLED(CONFIG_ZONE_DMA32) && !zone_dma32_are_empty()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) atomic_pool_dma32 = __dma_atomic_pool_init(atomic_pool_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) GFP_KERNEL | GFP_DMA32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) if (!atomic_pool_dma32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) ret = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) dma_atomic_pool_debugfs_init();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) postcore_initcall(dma_atomic_pool_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) static inline struct gen_pool *dma_guess_pool(struct gen_pool *prev, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) if (prev == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp & GFP_DMA32) && !zone_dma32_are_empty())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) return atomic_pool_dma32;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (atomic_pool_dma && (gfp & GFP_DMA))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) return atomic_pool_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) return atomic_pool_kernel;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) if (prev == atomic_pool_kernel)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) return atomic_pool_dma32 ? atomic_pool_dma32 : atomic_pool_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) if (prev == atomic_pool_dma32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) return atomic_pool_dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) static struct page *__dma_alloc_from_pool(struct device *dev, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) struct gen_pool *pool, void **cpu_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) phys_addr_t phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) addr = gen_pool_alloc(pool, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if (!addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) phys = gen_pool_virt_to_phys(pool, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) if (phys_addr_ok && !phys_addr_ok(dev, phys, size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) gen_pool_free(pool, addr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) if (gen_pool_avail(pool) < atomic_pool_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) schedule_work(&atomic_pool_work);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) *cpu_addr = (void *)addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) memset(*cpu_addr, 0, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) return pfn_to_page(__phys_to_pfn(phys));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) struct page *dma_alloc_from_pool(struct device *dev, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) void **cpu_addr, gfp_t gfp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) bool (*phys_addr_ok)(struct device *, phys_addr_t, size_t))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) struct gen_pool *pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) while ((pool = dma_guess_pool(pool, gfp))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) page = __dma_alloc_from_pool(dev, size, pool, cpu_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) phys_addr_ok);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) if (page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) return page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) WARN(1, "Failed to get suitable pool for %s\n", dev_name(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) bool dma_free_from_pool(struct device *dev, void *start, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) struct gen_pool *pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) while ((pool = dma_guess_pool(pool, 0))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) if (!gen_pool_has_addr(pool, (unsigned long)start, size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) gen_pool_free(pool, (unsigned long)start, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }