^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * DMA Pool allocator
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright 2001 David Brownell
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright 2007 Intel Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * Author: Matthew Wilcox <willy@linux.intel.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * This allocator returns small blocks of a given size which are DMA-able by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * the given device. It uses the dma_alloc_coherent page allocator to get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * new pages, then splits them up into blocks of the required size.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * Many older drivers still have their own code to do this.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * The current design of this allocator is fairly simple. The pool is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * represented by the 'struct dma_pool' which keeps a doubly-linked list of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * allocated pages. Each page in the page_list is split into blocks of at
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * list of free blocks within the page. Used blocks aren't tracked, but we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * keep a count of how many are currently allocated from each page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/dmapool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/mutex.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/poison.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/sched.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <linux/stat.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <linux/spinlock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <linux/wait.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define DMAPOOL_DEBUG 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) struct dma_pool { /* the pool */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) struct list_head page_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) spinlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) size_t allocation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) size_t boundary;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) char name[32];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) struct list_head pools;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) struct dma_page { /* cacheable header for 'allocation' bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) struct list_head page_list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) void *vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) dma_addr_t dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) unsigned int in_use;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) unsigned int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) static DEFINE_MUTEX(pools_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) static DEFINE_MUTEX(pools_reg_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) static ssize_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) show_pools(struct device *dev, struct device_attribute *attr, char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) unsigned temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) unsigned size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) char *next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct dma_page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) struct dma_pool *pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) next = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) size = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) temp = scnprintf(next, size, "poolinfo - 0.1\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) size -= temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) next += temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) mutex_lock(&pools_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) list_for_each_entry(pool, &dev->dma_pools, pools) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) unsigned pages = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) unsigned blocks = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) spin_lock_irq(&pool->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) list_for_each_entry(page, &pool->page_list, page_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) pages++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) blocks += page->in_use;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) spin_unlock_irq(&pool->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /* per-pool info, no real statistics yet */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) temp = scnprintf(next, size, "%-16s %4u %4zu %4zu %2u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) pool->name, blocks,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) pages * (pool->allocation / pool->size),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) pool->size, pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) size -= temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) next += temp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) mutex_unlock(&pools_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) return PAGE_SIZE - size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) static DEVICE_ATTR(pools, 0444, show_pools, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) * @name: name of pool, for diagnostics
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) * @dev: device that will be doing the DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) * @size: size of the blocks in this pool.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * @align: alignment requirement for blocks; must be a power of two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * @boundary: returned blocks won't cross this power of two boundary
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * Context: not in_interrupt()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * Given one of these pools, dma_pool_alloc()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * may be used to allocate memory. Such memory will all have "consistent"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) * DMA mappings, accessible by the device and its driver without using
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * cache flushing primitives. The actual size of blocks allocated may be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * larger than requested because of alignment.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * cross that size boundary. This is useful for devices which have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * addressing restrictions on individual DMA transfers, such as not crossing
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * boundaries of 4KBytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) * Return: a dma allocation pool with the requested characteristics, or
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) * %NULL if one can't be created.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) struct dma_pool *dma_pool_create(const char *name, struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) size_t size, size_t align, size_t boundary)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) struct dma_pool *retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) size_t allocation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) bool empty = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) if (align == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) align = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) else if (align & (align - 1))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) if (size == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) else if (size < 4)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) size = 4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) size = ALIGN(size, align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) allocation = max_t(size_t, size, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) if (!boundary)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) boundary = allocation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) else if ((boundary < size) || (boundary & (boundary - 1)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) if (!retval)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) strlcpy(retval->name, name, sizeof(retval->name));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) retval->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) INIT_LIST_HEAD(&retval->page_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) spin_lock_init(&retval->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) retval->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) retval->boundary = boundary;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) retval->allocation = allocation;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) INIT_LIST_HEAD(&retval->pools);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) * pools_lock ensures that the ->dma_pools list does not get corrupted.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) * pools_reg_lock ensures that there is not a race between
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) * dma_pool_create() and dma_pool_destroy() or within dma_pool_create()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) * when the first invocation of dma_pool_create() failed on
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) * device_create_file() and the second assumes that it has been done (I
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * know it is a short window).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) mutex_lock(&pools_reg_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) mutex_lock(&pools_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) if (list_empty(&dev->dma_pools))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) empty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) list_add(&retval->pools, &dev->dma_pools);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) mutex_unlock(&pools_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) if (empty) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) err = device_create_file(dev, &dev_attr_pools);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) if (err) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) mutex_lock(&pools_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) list_del(&retval->pools);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) mutex_unlock(&pools_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) mutex_unlock(&pools_reg_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) kfree(retval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) mutex_unlock(&pools_reg_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) EXPORT_SYMBOL(dma_pool_create);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) unsigned int offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) unsigned int next_boundary = pool->boundary;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) unsigned int next = offset + pool->size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) if (unlikely((next + pool->size) >= next_boundary)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) next = next_boundary;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) next_boundary += pool->boundary;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) *(int *)(page->vaddr + offset) = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) offset = next;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) } while (offset < pool->allocation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) struct dma_page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) page = kmalloc(sizeof(*page), mem_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) &page->dma, mem_flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) if (page->vaddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) #ifdef DMAPOOL_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) pool_initialise_page(pool, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) page->in_use = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) page->offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) kfree(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) return page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) static inline bool is_page_busy(struct dma_page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) return page->in_use != 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) dma_addr_t dma = page->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) #ifdef DMAPOOL_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) list_del(&page->page_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) kfree(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) * dma_pool_destroy - destroys a pool of dma memory blocks.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) * @pool: dma pool that will be destroyed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) * Context: !in_interrupt()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) * Caller guarantees that no more memory from the pool is in use,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) * and that nothing will try to use the pool after this call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) void dma_pool_destroy(struct dma_pool *pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) struct dma_page *page, *tmp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) bool empty = false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) if (unlikely(!pool))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) mutex_lock(&pools_reg_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) mutex_lock(&pools_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) list_del(&pool->pools);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) if (pool->dev && list_empty(&pool->dev->dma_pools))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) empty = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) mutex_unlock(&pools_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) if (empty)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) device_remove_file(pool->dev, &dev_attr_pools);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) mutex_unlock(&pools_reg_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) list_for_each_entry_safe(page, tmp, &pool->page_list, page_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) if (is_page_busy(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) if (pool->dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) dev_err(pool->dev, "%s %s, %p busy\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) pool->name, page->vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) pr_err("%s %s, %p busy\n", __func__,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) pool->name, page->vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) /* leak the still-in-use consistent memory */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) list_del(&page->page_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295) kfree(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) pool_free_page(pool, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) kfree(pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) EXPORT_SYMBOL(dma_pool_destroy);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) * dma_pool_alloc - get a block of consistent memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) * @pool: dma pool that will produce the block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) * @mem_flags: GFP_* bitmask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) * @handle: pointer to dma address of block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * Return: the kernel virtual address of a currently unused block,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * and reports its dma address through the handle.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * If such a memory block can't be allocated, %NULL is returned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) dma_addr_t *handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) struct dma_page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) size_t offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) void *retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) might_sleep_if(gfpflags_allow_blocking(mem_flags));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) spin_lock_irqsave(&pool->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) list_for_each_entry(page, &pool->page_list, page_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) if (page->offset < pool->allocation)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) goto ready;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) /* pool_alloc_page() might sleep, so temporarily drop &pool->lock */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) spin_unlock_irqrestore(&pool->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) spin_lock_irqsave(&pool->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) list_add(&page->page_list, &pool->page_list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) ready:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) page->in_use++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) offset = page->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) page->offset = *(int *)(page->vaddr + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) retval = offset + page->vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) *handle = offset + page->dma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) #ifdef DMAPOOL_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) u8 *data = retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) /* page->offset is stored in first 4 bytes */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) for (i = sizeof(page->offset); i < pool->size; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) if (data[i] == POOL_POISON_FREED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) if (pool->dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) dev_err(pool->dev, "%s %s, %p (corrupted)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) __func__, pool->name, retval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) pr_err("%s %s, %p (corrupted)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) __func__, pool->name, retval);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) * Dump the first 4 bytes even if they are not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) * POOL_POISON_FREED
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) data, pool->size, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) if (!(mem_flags & __GFP_ZERO))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) memset(retval, POOL_POISON_ALLOCATED, pool->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) spin_unlock_irqrestore(&pool->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) if (want_init_on_alloc(mem_flags))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) memset(retval, 0, pool->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) return retval;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) EXPORT_SYMBOL(dma_pool_alloc);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) struct dma_page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) list_for_each_entry(page, &pool->page_list, page_list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) if (dma < page->dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) if ((dma - page->dma) < pool->allocation)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) return page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) * dma_pool_free - put block back into dma pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) * @pool: the dma pool holding the block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) * @vaddr: virtual address of block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) * @dma: dma address of block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) * Caller promises neither device nor driver will again touch this block
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) * unless it is first re-allocated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) struct dma_page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) unsigned int offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) spin_lock_irqsave(&pool->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) page = pool_find_page(pool, dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) if (!page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) spin_unlock_irqrestore(&pool->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) if (pool->dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) dev_err(pool->dev, "%s %s, %p/%pad (bad dma)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) __func__, pool->name, vaddr, &dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) pr_err("%s %s, %p/%pad (bad dma)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) __func__, pool->name, vaddr, &dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) offset = vaddr - page->vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) if (want_init_on_free())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) memset(vaddr, 0, pool->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) #ifdef DMAPOOL_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) if ((dma - page->dma) != offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) spin_unlock_irqrestore(&pool->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) if (pool->dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) dev_err(pool->dev, "%s %s, %p (bad vaddr)/%pad\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) __func__, pool->name, vaddr, &dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) pr_err("%s %s, %p (bad vaddr)/%pad\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) __func__, pool->name, vaddr, &dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) unsigned int chain = page->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) while (chain < pool->allocation) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) if (chain != offset) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) chain = *(int *)(page->vaddr + chain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) spin_unlock_irqrestore(&pool->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) if (pool->dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) dev_err(pool->dev, "%s %s, dma %pad already free\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) __func__, pool->name, &dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) pr_err("%s %s, dma %pad already free\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) __func__, pool->name, &dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) memset(vaddr, POOL_POISON_FREED, pool->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) page->in_use--;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) *(int *)vaddr = page->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) page->offset = offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) * Resist a temptation to do
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) * if (!is_page_busy(page)) pool_free_page(pool, page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) * Better have a few empty pages hang around.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) spin_unlock_irqrestore(&pool->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) EXPORT_SYMBOL(dma_pool_free);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) * Managed DMA pool
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) static void dmam_pool_release(struct device *dev, void *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) struct dma_pool *pool = *(struct dma_pool **)res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) dma_pool_destroy(pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) static int dmam_pool_match(struct device *dev, void *res, void *match_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) return *(struct dma_pool **)res == match_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) * dmam_pool_create - Managed dma_pool_create()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) * @name: name of pool, for diagnostics
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) * @dev: device that will be doing the DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) * @size: size of the blocks in this pool.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) * @align: alignment requirement for blocks; must be a power of two
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) * @allocation: returned blocks won't cross this boundary (or zero)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) * Managed dma_pool_create(). DMA pool created with this function is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) * automatically destroyed on driver detach.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) * Return: a managed dma allocation pool with the requested
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) * characteristics, or %NULL if one can't be created.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) size_t size, size_t align, size_t allocation)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) struct dma_pool **ptr, *pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) if (!ptr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) if (pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) devres_add(dev, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) devres_free(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) return pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) EXPORT_SYMBOL(dmam_pool_create);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) * dmam_pool_destroy - Managed dma_pool_destroy()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) * @pool: dma pool that will be destroyed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) * Managed dma_pool_destroy().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) void dmam_pool_destroy(struct dma_pool *pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) struct device *dev = pool->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) WARN_ON(devres_release(dev, dmam_pool_release, dmam_pool_match, pool));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) EXPORT_SYMBOL(dmam_pool_destroy);