^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * arch/arm/common/dmabounce.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Special dma_{map/unmap/dma_sync}_* routines for systems that have
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * limited DMA windows. These functions utilize bounce buffers to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * copy data to/from buffers located outside the DMA region. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * only works for systems in which DMA memory is at the bottom of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * RAM, the remainder of memory is at the top and the DMA memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * can be marked as ZONE_DMA. Anything beyond that such as discontiguous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * DMA windows will require custom implementations that reserve memory
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * areas at early bootup.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) * Original version by Brad Parker (brad@heeltoe.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * Re-written by Christopher Hoover <ch@murgatroid.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) * Made generic by Deepak Saxena <dsaxena@plexity.net>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) * Copyright (C) 2002 Hewlett Packard Company.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) * Copyright (C) 2004 MontaVista Software, Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/page-flags.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) #include <linux/dma-direct.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <linux/dma-map-ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <linux/dmapool.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <linux/scatterlist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <asm/dma-iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #undef STATS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #ifdef STATS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #define DO_STATS(X) do { X ; } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) #define DO_STATS(X) do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) /* ************************************************** */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) struct safe_buffer {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) struct list_head node;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) /* original request */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) void *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) int direction;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) /* safe buffer info */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) struct dmabounce_pool *pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) void *safe;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) dma_addr_t safe_dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) struct dmabounce_pool {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) unsigned long size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) struct dma_pool *pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) #ifdef STATS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) unsigned long allocs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) struct dmabounce_device_info {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) struct list_head safe_buffers;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) #ifdef STATS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) unsigned long total_allocs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) unsigned long map_op_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) unsigned long bounce_count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) int attr_res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) struct dmabounce_pool small;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) struct dmabounce_pool large;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) rwlock_t lock;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) int (*needs_bounce)(struct device *, dma_addr_t, size_t);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) #ifdef STATS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) static ssize_t dmabounce_show(struct device *dev, struct device_attribute *attr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) char *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) return sprintf(buf, "%lu %lu %lu %lu %lu %lu\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) device_info->small.allocs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) device_info->large.allocs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) device_info->total_allocs - device_info->small.allocs -
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) device_info->large.allocs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) device_info->total_allocs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) device_info->map_op_count,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) device_info->bounce_count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) static DEVICE_ATTR(dmabounce_stats, 0400, dmabounce_show, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) /* allocate a 'safe' buffer and keep track of it */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) static inline struct safe_buffer *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) size_t size, enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) struct safe_buffer *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) struct dmabounce_pool *pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) struct device *dev = device_info->dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) __func__, ptr, size, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) if (size <= device_info->small.size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) pool = &device_info->small;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) } else if (size <= device_info->large.size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) pool = &device_info->large;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) buf = kmalloc(sizeof(struct safe_buffer), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) if (buf == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) dev_warn(dev, "%s: kmalloc failed\n", __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) buf->ptr = ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) buf->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133) buf->direction = dir;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) buf->pool = pool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) if (pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) buf->safe = dma_pool_alloc(pool->pool, GFP_ATOMIC,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) &buf->safe_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) buf->safe = dma_alloc_coherent(dev, size, &buf->safe_dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) if (buf->safe == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) dev_warn(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) "%s: could not alloc dma memory (size=%d)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) __func__, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) #ifdef STATS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) if (pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) pool->allocs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) device_info->total_allocs++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) write_lock_irqsave(&device_info->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) list_add(&buf->node, &device_info->safe_buffers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) write_unlock_irqrestore(&device_info->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) return buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) /* determine if a buffer is from our "safe" pool */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) static inline struct safe_buffer *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) struct safe_buffer *b, *rb = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) read_lock_irqsave(&device_info->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) list_for_each_entry(b, &device_info->safe_buffers, node)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) if (b->safe_dma_addr <= safe_dma_addr &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) b->safe_dma_addr + b->size > safe_dma_addr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) rb = b;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) read_unlock_irqrestore(&device_info->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) return rb;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) static inline void
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer *buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) dev_dbg(device_info->dev, "%s(buf=%p)\n", __func__, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) write_lock_irqsave(&device_info->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) list_del(&buf->node);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) write_unlock_irqrestore(&device_info->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) if (buf->pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) dma_pool_free(buf->pool->pool, buf->safe, buf->safe_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) dma_free_coherent(device_info->dev, buf->size, buf->safe,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) buf->safe_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) /* ************************************************** */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) static struct safe_buffer *find_safe_buffer_dev(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) dma_addr_t dma_addr, const char *where)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) if (!dev || !dev->archdata.dmabounce)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) if (dma_mapping_error(dev, dma_addr)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) dev_err(dev, "Trying to %s invalid mapping\n", where);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) return find_safe_buffer(dev->archdata.dmabounce, dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) static int needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) if (!dev || !dev->archdata.dmabounce)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) if (dev->dma_mask) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) unsigned long limit, mask = *dev->dma_mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) limit = (mask + 1) & ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) if (limit && size > limit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) dev_err(dev, "DMA mapping too big (requested %#x "
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) "mask %#Lx)\n", size, *dev->dma_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) return -E2BIG;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) /* Figure out if we need to bounce from the DMA mask. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) if ((dma_addr | (dma_addr + size - 1)) & ~mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) return !!dev->archdata.dmabounce->needs_bounce(dev, dma_addr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) enum dma_data_direction dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) struct safe_buffer *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) if (device_info)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) DO_STATS ( device_info->map_op_count++ );
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) buf = alloc_safe_buffer(device_info, ptr, size, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) if (buf == NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) __func__, ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) return DMA_MAPPING_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) buf->safe, buf->safe_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) if ((dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) __func__, ptr, buf->safe, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) memcpy(buf->safe, ptr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) return buf->safe_dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) static inline void unmap_single(struct device *dev, struct safe_buffer *buf,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) size_t size, enum dma_data_direction dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) BUG_ON(buf->size != size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) BUG_ON(buf->direction != dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) buf->safe, buf->safe_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) DO_STATS(dev->archdata.dmabounce->bounce_count++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) if ((dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) &&
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) void *ptr = buf->ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) __func__, buf->safe, ptr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) memcpy(ptr, buf->safe, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * Since we may have written to a page cache page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) * we need to ensure that the data will be coherent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) * with user mappings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) __cpuc_flush_dcache_area(ptr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) free_safe_buffer(dev->archdata.dmabounce, buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) /* ************************************************** */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) * see if a buffer address is in an 'unsafe' range. if it is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) * allocate a 'safe' buffer and copy the unsafe buffer into it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * substitute the safe buffer for the unsafe one.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * (basically move the buffer from an unsafe area to a safe one)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) unsigned long offset, size_t size, enum dma_data_direction dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) dma_addr_t dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) __func__, page, offset, size, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) dma_addr = pfn_to_dma(dev, page_to_pfn(page)) + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) ret = needs_bounce(dev, dma_addr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) return DMA_MAPPING_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) if (ret == 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) arm_dma_ops.sync_single_for_device(dev, dma_addr, size, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) return dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) if (PageHighMem(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) return DMA_MAPPING_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) return map_single(dev, page_address(page) + offset, size, dir, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) * see if a mapped address was really a "safe" buffer and if so, copy
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * the data from the safe buffer back to the unsafe buffer and free up
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) * the safe buffer. (basically return things back to the way they
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) * should be)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) static void dmabounce_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) enum dma_data_direction dir, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) struct safe_buffer *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) dev_dbg(dev, "%s(dma=%#x,size=%d,dir=%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) __func__, dma_addr, size, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) buf = find_safe_buffer_dev(dev, dma_addr, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) if (!buf) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) arm_dma_ops.sync_single_for_cpu(dev, dma_addr, size, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) unmap_single(dev, buf, size, dir, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) size_t sz, enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) struct safe_buffer *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) unsigned long off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) __func__, addr, sz, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) buf = find_safe_buffer_dev(dev, addr, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) off = addr - buf->safe_dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) BUG_ON(buf->direction != dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) __func__, buf->ptr, virt_to_dma(dev, buf->ptr), off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) buf->safe, buf->safe_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) DO_STATS(dev->archdata.dmabounce->bounce_count++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) __func__, buf->safe + off, buf->ptr + off, sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) memcpy(buf->ptr + off, buf->safe + off, sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) static void dmabounce_sync_for_cpu(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) dma_addr_t handle, size_t size, enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) if (!__dmabounce_sync_for_cpu(dev, handle, size, dir))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) arm_dma_ops.sync_single_for_cpu(dev, handle, size, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) size_t sz, enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) struct safe_buffer *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) unsigned long off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) __func__, addr, sz, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) buf = find_safe_buffer_dev(dev, addr, __func__);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) off = addr - buf->safe_dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) BUG_ON(buf->direction != dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) __func__, buf->ptr, virt_to_dma(dev, buf->ptr), off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) buf->safe, buf->safe_dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) DO_STATS(dev->archdata.dmabounce->bounce_count++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) dev_dbg(dev, "%s: copy out unsafe %p to safe %p, size %d\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431) __func__,buf->ptr + off, buf->safe + off, sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) memcpy(buf->safe + off, buf->ptr + off, sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) static void dmabounce_sync_for_device(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) dma_addr_t handle, size_t size, enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) if (!__dmabounce_sync_for_device(dev, handle, size, dir))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) arm_dma_ops.sync_single_for_device(dev, handle, size, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) static int dmabounce_dma_supported(struct device *dev, u64 dma_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) if (dev->archdata.dmabounce)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) return arm_dma_ops.dma_supported(dev, dma_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) static const struct dma_map_ops dmabounce_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) .alloc = arm_dma_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) .free = arm_dma_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) .mmap = arm_dma_mmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) .get_sgtable = arm_dma_get_sgtable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) .map_page = dmabounce_map_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) .unmap_page = dmabounce_unmap_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) .sync_single_for_cpu = dmabounce_sync_for_cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) .sync_single_for_device = dmabounce_sync_for_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) .map_sg = arm_dma_map_sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) .unmap_sg = arm_dma_unmap_sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) .sync_sg_for_device = arm_dma_sync_sg_for_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) .dma_supported = dmabounce_dma_supported,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) const char *name, unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) pool->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) DO_STATS(pool->allocs = 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) pool->pool = dma_pool_create(name, dev, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) 0 /* byte alignment */,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) 0 /* no page-crossing issues */);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) return pool->pool ? 0 : -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) unsigned long large_buffer_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) int (*needs_bounce_fn)(struct device *, dma_addr_t, size_t))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) struct dmabounce_device_info *device_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) device_info = kmalloc(sizeof(struct dmabounce_device_info), GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) if (!device_info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) dev_err(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) "Could not allocated dmabounce_device_info\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) ret = dmabounce_init_pool(&device_info->small, dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497) "small_dmabounce_pool", small_buffer_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) dev_err(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) "dmabounce: could not allocate DMA pool for %ld byte objects\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501) small_buffer_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) goto err_free;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) if (large_buffer_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) ret = dmabounce_init_pool(&device_info->large, dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) "large_dmabounce_pool",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) large_buffer_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509) if (ret) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) dev_err(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) "dmabounce: could not allocate DMA pool for %ld byte objects\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) large_buffer_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) goto err_destroy;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) device_info->dev = dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) INIT_LIST_HEAD(&device_info->safe_buffers);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) rwlock_init(&device_info->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) device_info->needs_bounce = needs_bounce_fn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) #ifdef STATS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) device_info->total_allocs = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) device_info->map_op_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) device_info->bounce_count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) device_info->attr_res = device_create_file(dev, &dev_attr_dmabounce_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) dev->archdata.dmabounce = device_info;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) set_dma_ops(dev, &dmabounce_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) dev_info(dev, "dmabounce: registered device\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) err_destroy:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) dma_pool_destroy(device_info->small.pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) err_free:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) kfree(device_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) EXPORT_SYMBOL(dmabounce_register_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) void dmabounce_unregister_dev(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) dev->archdata.dmabounce = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) set_dma_ops(dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) if (!device_info) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) dev_warn(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) "Never registered with dmabounce but attempting"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) "to unregister!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) if (!list_empty(&device_info->safe_buffers)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) dev_err(dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) "Removing from dmabounce with pending buffers!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) if (device_info->small.pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) dma_pool_destroy(device_info->small.pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) if (device_info->large.pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) dma_pool_destroy(device_info->large.pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) #ifdef STATS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) if (device_info->attr_res == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) device_remove_file(dev, &dev_attr_dmabounce_stats);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) kfree(device_info);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) dev_info(dev, "dmabounce: device unregistered\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) EXPORT_SYMBOL(dmabounce_unregister_dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) MODULE_AUTHOR("Christopher Hoover <ch@hpl.hp.com>, Deepak Saxena <dsaxena@plexity.net>");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) MODULE_DESCRIPTION("Special dma_{map/unmap/dma_sync}_* routines for systems with limited DMA windows");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) MODULE_LICENSE("GPL");