^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Basic general purpose allocator for managing special purpose
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * memory, for example, memory that is not managed by the regular
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * kmalloc/kfree interface. Uses for this includes on-device special
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * memory, uncached memory etc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * It is safe to use the allocator in NMI handlers and other special
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * unblockable contexts that could otherwise deadlock on locks. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * is implemented by using atomic operations and retries on any
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * conflicts. The disadvantage is that there may be livelocks in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * extreme cases. For better scalability, one allocator can be used
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * for each CPU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * The lockless operation only works if there is enough memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * available. If new memory is added to the pool a lock has to be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * still taken. So any user relying on locklessness has to ensure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * that sufficient memory is preallocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * The basic atomic operation of this allocator is cmpxchg on long.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * On architectures that don't have NMI-safe cmpxchg implementation,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * the allocator can NOT be used in NMI handler. So code uses the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * allocator in NMI handler should depend on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/bitmap.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/rculist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/interrupt.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/genalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) static inline size_t chunk_size(const struct gen_pool_chunk *chunk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) return chunk->end_addr - chunk->start_addr + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) unsigned long val, nval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) nval = *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) val = nval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) if (val & mask_to_set)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) } while ((nval = cmpxchg(addr, val, val | mask_to_set)) != val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) unsigned long val, nval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) nval = *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) val = nval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) if ((val & mask_to_clear) != mask_to_clear)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) return -EBUSY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) cpu_relax();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) } while ((nval = cmpxchg(addr, val, val & ~mask_to_clear)) != val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * bitmap_set_ll - set the specified number of bits at the specified position
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * @map: pointer to a bitmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * @start: a bit position in @map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * @nr: number of bits to set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * Set @nr bits start from @start in @map lock-lessly. Several users
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * can set/clear the same bitmap simultaneously without lock. If two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) * users set the same bit, one user will return remain bits, otherwise
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) * return 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) static int bitmap_set_ll(unsigned long *map, unsigned long start, unsigned long nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) unsigned long *p = map + BIT_WORD(start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) const unsigned long size = start + nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) while (nr >= bits_to_set) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) if (set_bits_ll(p, mask_to_set))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) return nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) nr -= bits_to_set;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) bits_to_set = BITS_PER_LONG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) mask_to_set = ~0UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) p++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) if (nr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) mask_to_set &= BITMAP_LAST_WORD_MASK(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) if (set_bits_ll(p, mask_to_set))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) return nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * bitmap_clear_ll - clear the specified number of bits at the specified position
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * @map: pointer to a bitmap
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * @start: a bit position in @map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * @nr: number of bits to set
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * Clear @nr bits start from @start in @map lock-lessly. Several users
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * can set/clear the same bitmap simultaneously without lock. If two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * users clear the same bit, one user will return remain bits,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * otherwise return 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) static unsigned long
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) bitmap_clear_ll(unsigned long *map, unsigned long start, unsigned long nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) unsigned long *p = map + BIT_WORD(start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) const unsigned long size = start + nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) while (nr >= bits_to_clear) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) if (clear_bits_ll(p, mask_to_clear))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) return nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) nr -= bits_to_clear;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) bits_to_clear = BITS_PER_LONG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) mask_to_clear = ~0UL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) p++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) if (nr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (clear_bits_ll(p, mask_to_clear))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) return nr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * gen_pool_create - create a new special memory pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) * @nid: node id of the node the pool structure should be allocated on, or -1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * Create a new special memory pool that can be used to manage special purpose
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * memory not managed by the regular kmalloc/kfree interface.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) struct gen_pool *pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) if (pool != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) spin_lock_init(&pool->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) INIT_LIST_HEAD(&pool->chunks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) pool->min_alloc_order = min_alloc_order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) pool->algo = gen_pool_first_fit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) pool->data = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) pool->name = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) return pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) EXPORT_SYMBOL(gen_pool_create);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) * gen_pool_add_owner- add a new chunk of special memory to the pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) * @pool: pool to add new memory chunk to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * @virt: virtual starting address of memory chunk to add to pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * @phys: physical starting address of memory chunk to add to pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * @size: size in bytes of the memory chunk to add to pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * @nid: node id of the node the chunk structure and bitmap should be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * allocated on, or -1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * @owner: private data the publisher would like to recall at alloc time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) * Add a new chunk of special memory to the specified pool.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * Returns 0 on success or a -ve errno on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) int gen_pool_add_owner(struct gen_pool *pool, unsigned long virt, phys_addr_t phys,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) size_t size, int nid, void *owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) struct gen_pool_chunk *chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) unsigned long nbits = size >> pool->min_alloc_order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) unsigned long nbytes = sizeof(struct gen_pool_chunk) +
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) BITS_TO_LONGS(nbits) * sizeof(long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) chunk = vzalloc_node(nbytes, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) if (unlikely(chunk == NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) chunk->phys_addr = phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) chunk->start_addr = virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) chunk->end_addr = virt + size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) chunk->owner = owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) atomic_long_set(&chunk->avail, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) spin_lock(&pool->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) list_add_rcu(&chunk->next_chunk, &pool->chunks);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) spin_unlock(&pool->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) EXPORT_SYMBOL(gen_pool_add_owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) * gen_pool_virt_to_phys - return the physical address of memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) * @pool: pool to allocate from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) * @addr: starting address of memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) * Returns the physical address on success, or -1 on error.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) struct gen_pool_chunk *chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) phys_addr_t paddr = -1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) paddr = chunk->phys_addr + (addr - chunk->start_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) return paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) EXPORT_SYMBOL(gen_pool_virt_to_phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) * gen_pool_destroy - destroy a special memory pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) * @pool: pool to destroy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) * Destroy the specified special memory pool. Verifies that there are no
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) * outstanding allocations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) void gen_pool_destroy(struct gen_pool *pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) struct list_head *_chunk, *_next_chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) struct gen_pool_chunk *chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) int order = pool->min_alloc_order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) unsigned long bit, end_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) list_del(&chunk->next_chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) end_bit = chunk_size(chunk) >> order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) bit = find_next_bit(chunk->bits, end_bit, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) BUG_ON(bit < end_bit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) vfree(chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) kfree_const(pool->name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) kfree(pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) EXPORT_SYMBOL(gen_pool_destroy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * gen_pool_alloc_algo_owner - allocate special memory from the pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * @pool: pool to allocate from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) * @size: number of bytes to allocate from the pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) * @algo: algorithm passed from caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) * @data: data passed to algorithm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) * @owner: optionally retrieve the chunk owner
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * Allocate the requested number of bytes from the specified pool.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) * Uses the pool allocation function (with first-fit algorithm by default).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) * Can not be used in NMI handler on architectures without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) * NMI-safe cmpxchg implementation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) unsigned long gen_pool_alloc_algo_owner(struct gen_pool *pool, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) genpool_algo_t algo, void *data, void **owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) struct gen_pool_chunk *chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) unsigned long addr = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) int order = pool->min_alloc_order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) unsigned long nbits, start_bit, end_bit, remain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) BUG_ON(in_nmi());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) if (owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) *owner = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) if (size == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) nbits = (size + (1UL << order) - 1) >> order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) if (size > atomic_long_read(&chunk->avail))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) start_bit = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) end_bit = chunk_size(chunk) >> order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) retry:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) start_bit = algo(chunk->bits, end_bit, start_bit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) nbits, data, pool, chunk->start_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) if (start_bit >= end_bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) remain = bitmap_set_ll(chunk->bits, start_bit, nbits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) if (remain) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) remain = bitmap_clear_ll(chunk->bits, start_bit,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) nbits - remain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) BUG_ON(remain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) goto retry;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) addr = chunk->start_addr + ((unsigned long)start_bit << order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) size = nbits << order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) atomic_long_sub(size, &chunk->avail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) if (owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) *owner = chunk->owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) EXPORT_SYMBOL(gen_pool_alloc_algo_owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) * @pool: pool to allocate from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) * @size: number of bytes to allocate from the pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * @dma: dma-view physical address return value. Use %NULL if unneeded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) * Allocate the requested number of bytes from the specified pool.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) * Uses the pool allocation function (with first-fit algorithm by default).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) * Can not be used in NMI handler on architectures without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) * NMI-safe cmpxchg implementation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) * Return: virtual address of the allocated memory, or %NULL on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) return gen_pool_dma_alloc_algo(pool, size, dma, pool->algo, pool->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) EXPORT_SYMBOL(gen_pool_dma_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) * gen_pool_dma_alloc_algo - allocate special memory from the pool for DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) * usage with the given pool algorithm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) * @pool: pool to allocate from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) * @size: number of bytes to allocate from the pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) * @dma: DMA-view physical address return value. Use %NULL if unneeded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) * @algo: algorithm passed from caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) * @data: data passed to algorithm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) * Allocate the requested number of bytes from the specified pool. Uses the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) * given pool allocation function. Can not be used in NMI handler on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) * architectures without NMI-safe cmpxchg implementation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * Return: virtual address of the allocated memory, or %NULL on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) void *gen_pool_dma_alloc_algo(struct gen_pool *pool, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) dma_addr_t *dma, genpool_algo_t algo, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) unsigned long vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) if (!pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) vaddr = gen_pool_alloc_algo(pool, size, algo, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) if (!vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) if (dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) *dma = gen_pool_virt_to_phys(pool, vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) return (void *)vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) EXPORT_SYMBOL(gen_pool_dma_alloc_algo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * gen_pool_dma_alloc_align - allocate special memory from the pool for DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * usage with the given alignment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) * @pool: pool to allocate from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * @size: number of bytes to allocate from the pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * @dma: DMA-view physical address return value. Use %NULL if unneeded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * @align: alignment in bytes for starting address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) * Allocate the requested number bytes from the specified pool, with the given
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) * alignment restriction. Can not be used in NMI handler on architectures
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) * without NMI-safe cmpxchg implementation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) * Return: virtual address of the allocated memory, or %NULL on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) void *gen_pool_dma_alloc_align(struct gen_pool *pool, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) dma_addr_t *dma, int align)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) struct genpool_data_align data = { .align = align };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) return gen_pool_dma_alloc_algo(pool, size, dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) gen_pool_first_fit_align, &data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) EXPORT_SYMBOL(gen_pool_dma_alloc_align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) * gen_pool_dma_zalloc - allocate special zeroed memory from the pool for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) * DMA usage
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) * @pool: pool to allocate from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) * @size: number of bytes to allocate from the pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) * @dma: dma-view physical address return value. Use %NULL if unneeded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) * Allocate the requested number of zeroed bytes from the specified pool.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) * Uses the pool allocation function (with first-fit algorithm by default).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) * Can not be used in NMI handler on architectures without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) * NMI-safe cmpxchg implementation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) * Return: virtual address of the allocated zeroed memory, or %NULL on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) void *gen_pool_dma_zalloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) return gen_pool_dma_zalloc_algo(pool, size, dma, pool->algo, pool->data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) EXPORT_SYMBOL(gen_pool_dma_zalloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) * gen_pool_dma_zalloc_algo - allocate special zeroed memory from the pool for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) * DMA usage with the given pool algorithm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) * @pool: pool to allocate from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) * @size: number of bytes to allocate from the pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) * @dma: DMA-view physical address return value. Use %NULL if unneeded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) * @algo: algorithm passed from caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) * @data: data passed to algorithm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) * Allocate the requested number of zeroed bytes from the specified pool. Uses
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) * the given pool allocation function. Can not be used in NMI handler on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) * architectures without NMI-safe cmpxchg implementation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) * Return: virtual address of the allocated zeroed memory, or %NULL on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) void *gen_pool_dma_zalloc_algo(struct gen_pool *pool, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) dma_addr_t *dma, genpool_algo_t algo, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) void *vaddr = gen_pool_dma_alloc_algo(pool, size, dma, algo, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) if (vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) memset(vaddr, 0, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) return vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) EXPORT_SYMBOL(gen_pool_dma_zalloc_algo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) * gen_pool_dma_zalloc_align - allocate special zeroed memory from the pool for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) * DMA usage with the given alignment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) * @pool: pool to allocate from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) * @size: number of bytes to allocate from the pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) * @dma: DMA-view physical address return value. Use %NULL if unneeded.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) * @align: alignment in bytes for starting address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) * Allocate the requested number of zeroed bytes from the specified pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) * with the given alignment restriction. Can not be used in NMI handler on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) * architectures without NMI-safe cmpxchg implementation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) * Return: virtual address of the allocated zeroed memory, or %NULL on failure
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) void *gen_pool_dma_zalloc_align(struct gen_pool *pool, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) dma_addr_t *dma, int align)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) struct genpool_data_align data = { .align = align };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) return gen_pool_dma_zalloc_algo(pool, size, dma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) gen_pool_first_fit_align, &data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) EXPORT_SYMBOL(gen_pool_dma_zalloc_align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) * gen_pool_free_owner - free allocated special memory back to the pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) * @pool: pool to free to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) * @addr: starting address of memory to free back to pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) * @size: size in bytes of memory to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) * @owner: private data stashed at gen_pool_add() time
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) * Free previously allocated special memory back to the specified
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) * pool. Can not be used in NMI handler on architectures without
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) * NMI-safe cmpxchg implementation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) void gen_pool_free_owner(struct gen_pool *pool, unsigned long addr, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) void **owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) struct gen_pool_chunk *chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) int order = pool->min_alloc_order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) unsigned long start_bit, nbits, remain;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) BUG_ON(in_nmi());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) if (owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) *owner = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) nbits = (size + (1UL << order) - 1) >> order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) BUG_ON(addr + size - 1 > chunk->end_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) start_bit = (addr - chunk->start_addr) >> order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) remain = bitmap_clear_ll(chunk->bits, start_bit, nbits);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) BUG_ON(remain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) size = nbits << order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) atomic_long_add(size, &chunk->avail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) if (owner)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) *owner = chunk->owner;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) EXPORT_SYMBOL(gen_pool_free_owner);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) * gen_pool_for_each_chunk - call func for every chunk of generic memory pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) * @pool: the generic memory pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) * @func: func to call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) * @data: additional data used by @func
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) * Call @func for every chunk of generic memory pool. The @func is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) * called with rcu_read_lock held.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) void gen_pool_for_each_chunk(struct gen_pool *pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) void (*func)(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) struct gen_pool_chunk *chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) func(pool, chunk, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) EXPORT_SYMBOL(gen_pool_for_each_chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) * gen_pool_has_addr - checks if an address falls within the range of a pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) * @pool: the generic memory pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) * @start: start address
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) * @size: size of the region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) * Check if the range of addresses falls within the specified pool. Returns
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) * true if the entire range is contained in the pool and false otherwise.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) bool gen_pool_has_addr(struct gen_pool *pool, unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) bool found = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) unsigned long end = start + size - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) struct gen_pool_chunk *chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) if (start >= chunk->start_addr && start <= chunk->end_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) if (end <= chunk->end_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) found = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) return found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) EXPORT_SYMBOL(gen_pool_has_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) * gen_pool_avail - get available free space of the pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) * @pool: pool to get available free space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) * Return available free space of the specified pool.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) size_t gen_pool_avail(struct gen_pool *pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) struct gen_pool_chunk *chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) size_t avail = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) avail += atomic_long_read(&chunk->avail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) return avail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) EXPORT_SYMBOL_GPL(gen_pool_avail);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) * gen_pool_size - get size in bytes of memory managed by the pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) * @pool: pool to get size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) * Return size in bytes of memory managed by the pool.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) size_t gen_pool_size(struct gen_pool *pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) struct gen_pool_chunk *chunk;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) size_t size = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604) list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) size += chunk_size(chunk);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) EXPORT_SYMBOL_GPL(gen_pool_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) * gen_pool_set_algo - set the allocation algorithm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613) * @pool: pool to change allocation algorithm
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) * @algo: custom algorithm function
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) * @data: additional data used by @algo
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) * Call @algo for each memory allocation in the pool.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) * If @algo is NULL use gen_pool_first_fit as default
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) * memory allocation function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) rcu_read_lock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) pool->algo = algo;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) if (!pool->algo)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) pool->algo = gen_pool_first_fit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) pool->data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) rcu_read_unlock();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) EXPORT_SYMBOL(gen_pool_set_algo);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) * gen_pool_first_fit - find the first available region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) * of memory matching the size requirement (no alignment constraint)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) * @map: The address to base the search on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) * @size: The bitmap size in bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) * @start: The bitnumber to start searching at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) * @nr: The number of zeroed bits we're looking for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) * @data: additional data - unused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) * @pool: pool to find the fit region memory from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) unsigned long start, unsigned int nr, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) struct gen_pool *pool, unsigned long start_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) return bitmap_find_next_zero_area(map, size, start, nr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) EXPORT_SYMBOL(gen_pool_first_fit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) * gen_pool_first_fit_align - find the first available region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) * of memory matching the size requirement (alignment constraint)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656) * @map: The address to base the search on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) * @size: The bitmap size in bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) * @start: The bitnumber to start searching at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) * @nr: The number of zeroed bits we're looking for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) * @data: data for alignment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661) * @pool: pool to get order from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) unsigned long gen_pool_first_fit_align(unsigned long *map, unsigned long size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) unsigned long start, unsigned int nr, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) struct gen_pool *pool, unsigned long start_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) struct genpool_data_align *alignment;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) unsigned long align_mask, align_off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) int order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) alignment = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) order = pool->min_alloc_order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) align_mask = ((alignment->align + (1UL << order) - 1) >> order) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) align_off = (start_addr & (alignment->align - 1)) >> order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) return bitmap_find_next_zero_area_off(map, size, start, nr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) align_mask, align_off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) EXPORT_SYMBOL(gen_pool_first_fit_align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) * gen_pool_fixed_alloc - reserve a specific region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) * @map: The address to base the search on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) * @size: The bitmap size in bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) * @start: The bitnumber to start searching at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) * @nr: The number of zeroed bits we're looking for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) * @data: data for alignment
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) * @pool: pool to get order from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) unsigned long gen_pool_fixed_alloc(unsigned long *map, unsigned long size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) unsigned long start, unsigned int nr, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) struct gen_pool *pool, unsigned long start_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694) struct genpool_data_fixed *fixed_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) int order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) unsigned long offset_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697) unsigned long start_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) fixed_data = data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) order = pool->min_alloc_order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) offset_bit = fixed_data->offset >> order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) if (WARN_ON(fixed_data->offset & ((1UL << order) - 1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) start_bit = bitmap_find_next_zero_area(map, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) start + offset_bit, nr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707) if (start_bit != offset_bit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) start_bit = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) return start_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711) EXPORT_SYMBOL(gen_pool_fixed_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) * gen_pool_first_fit_order_align - find the first available region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) * of memory matching the size requirement. The region will be aligned
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) * to the order of the size specified.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) * @map: The address to base the search on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) * @size: The bitmap size in bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) * @start: The bitnumber to start searching at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720) * @nr: The number of zeroed bits we're looking for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) * @data: additional data - unused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722) * @pool: pool to find the fit region memory from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) unsigned long gen_pool_first_fit_order_align(unsigned long *map,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725) unsigned long size, unsigned long start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) unsigned int nr, void *data, struct gen_pool *pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) unsigned long start_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) unsigned long align_mask = roundup_pow_of_two(nr) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) return bitmap_find_next_zero_area(map, size, start, nr, align_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) EXPORT_SYMBOL(gen_pool_first_fit_order_align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) * gen_pool_best_fit - find the best fitting region of memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) * macthing the size requirement (no alignment constraint)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738) * @map: The address to base the search on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) * @size: The bitmap size in bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) * @start: The bitnumber to start searching at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) * @nr: The number of zeroed bits we're looking for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) * @data: additional data - unused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) * @pool: pool to find the fit region memory from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) * Iterate over the bitmap to find the smallest free region
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) * which we can allocate the memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) unsigned long start, unsigned int nr, void *data,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) struct gen_pool *pool, unsigned long start_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) unsigned long start_bit = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) unsigned long len = size + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) unsigned long index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) index = bitmap_find_next_zero_area(map, size, start, nr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758) while (index < size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) unsigned long next_bit = find_next_bit(map, size, index + nr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) if ((next_bit - index) < len) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) len = next_bit - index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) start_bit = index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) if (len == nr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) return start_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) index = bitmap_find_next_zero_area(map, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) next_bit + 1, nr, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) return start_bit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) EXPORT_SYMBOL(gen_pool_best_fit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) static void devm_gen_pool_release(struct device *dev, void *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) gen_pool_destroy(*(struct gen_pool **)res);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) static int devm_gen_pool_match(struct device *dev, void *res, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781) struct gen_pool **p = res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) /* NULL data matches only a pool without an assigned name */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) if (!data && !(*p)->name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) if (!data || !(*p)->name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) return !strcmp((*p)->name, data);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) * gen_pool_get - Obtain the gen_pool (if any) for a device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) * @dev: device to retrieve the gen_pool from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) * Returns the gen_pool for the device if one is present, or NULL.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) struct gen_pool *gen_pool_get(struct device *dev, const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) struct gen_pool **p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) p = devres_find(dev, devm_gen_pool_release, devm_gen_pool_match,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) (void *)name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) if (!p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) return *p;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) EXPORT_SYMBOL_GPL(gen_pool_get);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) * devm_gen_pool_create - managed gen_pool_create
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) * @dev: device that provides the gen_pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816) * @nid: node selector for allocated gen_pool, %NUMA_NO_NODE for all nodes
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) * Create a new special memory pool that can be used to manage special purpose
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820) * memory not managed by the regular kmalloc/kfree interface. The pool will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) * automatically destroyed by the device management code.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824) int nid, const char *name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) struct gen_pool **ptr, *pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) const char *pool_name = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) /* Check that genpool to be created is uniquely addressed on device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830) if (gen_pool_get(dev, name))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) if (name) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) pool_name = kstrdup_const(name, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) if (!pool_name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) ptr = devres_alloc(devm_gen_pool_release, sizeof(*ptr), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) if (!ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) goto free_pool_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) pool = gen_pool_create(min_alloc_order, nid);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844) if (!pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) goto free_devres;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) *ptr = pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848) pool->name = pool_name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) devres_add(dev, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) return pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) free_devres:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854) devres_free(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) free_pool_name:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) kfree_const(pool_name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858) return ERR_PTR(-ENOMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) EXPORT_SYMBOL(devm_gen_pool_create);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) #ifdef CONFIG_OF
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) * of_gen_pool_get - find a pool by phandle property
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865) * @np: device node
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) * @propname: property name containing phandle(s)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) * @index: index into the phandle array
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) * Returns the pool that contains the chunk starting at the physical
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) * address of the device tree node pointed at by the phandle property,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) * or NULL if not found.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) struct gen_pool *of_gen_pool_get(struct device_node *np,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) const char *propname, int index)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) struct platform_device *pdev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) struct device_node *np_pool, *parent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878) const char *name = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) struct gen_pool *pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) np_pool = of_parse_phandle(np, propname, index);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) if (!np_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) pdev = of_find_device_by_node(np_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) if (!pdev) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) /* Check if named gen_pool is created by parent node device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) parent = of_get_parent(np_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) pdev = of_find_device_by_node(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) of_node_put(parent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) of_property_read_string(np_pool, "label", &name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) if (!name)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) name = np_pool->name;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) if (pdev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) pool = gen_pool_get(&pdev->dev, name);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) of_node_put(np_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) return pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) EXPORT_SYMBOL_GPL(of_gen_pool_get);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) #endif /* CONFIG_OF */