^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * arch-independent dma-mapping routines
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (c) 2006 SUSE Linux Products GmbH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (c) 2006 Tejun Heo <teheo@suse.de>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/memblock.h> /* for max_pfn */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/acpi.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/dma-map-ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/of_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include "debug.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include "direct.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) * Managed DMA API
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) struct dma_devres {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) void *vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) dma_addr_t dma_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) unsigned long attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) static void dmam_release(struct device *dev, void *res)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) struct dma_devres *this = res;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) this->attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) static int dmam_match(struct device *dev, void *res, void *match_data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) struct dma_devres *this = res, *match = match_data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) if (this->vaddr == match->vaddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) WARN_ON(this->size != match->size ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) this->dma_handle != match->dma_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) * dmam_free_coherent - Managed dma_free_coherent()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) * @dev: Device to free coherent memory for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) * @size: Size of allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) * @vaddr: Virtual address of the memory to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) * @dma_handle: DMA handle of the memory to free
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * Managed dma_free_coherent().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) dma_addr_t dma_handle)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) struct dma_devres match_data = { size, vaddr, dma_handle };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) dma_free_coherent(dev, size, vaddr, dma_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) EXPORT_SYMBOL(dmam_free_coherent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) * dmam_alloc_attrs - Managed dma_alloc_attrs()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) * @dev: Device to allocate non_coherent memory for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) * @size: Size of allocation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) * @dma_handle: Out argument for allocated DMA handle
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) * @gfp: Allocation flags
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * @attrs: Flags in the DMA_ATTR_* namespace.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) * Managed dma_alloc_attrs(). Memory allocated using this function will be
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) * automatically released on driver detach.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) * RETURNS:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) * Pointer to allocated memory on success, NULL on failure.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) gfp_t gfp, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) struct dma_devres *dr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) void *vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) dr = devres_alloc(dmam_release, sizeof(*dr), gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) if (!dr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) if (!vaddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) devres_free(dr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) dr->vaddr = vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) dr->dma_handle = *dma_handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) dr->size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) dr->attrs = attrs;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) devres_add(dev, dr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) return vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) EXPORT_SYMBOL(dmam_alloc_attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) static bool dma_go_direct(struct device *dev, dma_addr_t mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) const struct dma_map_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) if (likely(!ops))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) #ifdef CONFIG_DMA_OPS_BYPASS
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) if (dev->dma_ops_bypass)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) return min_not_zero(mask, dev->bus_dma_limit) >=
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) dma_direct_get_required_mask(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) * Check if the devices uses a direct mapping for streaming DMA operations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) * This allows IOMMU drivers to set a bypass mode if the DMA mask is large
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) * enough.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) static inline bool dma_alloc_direct(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) const struct dma_map_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) return dma_go_direct(dev, dev->coherent_dma_mask, ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) static inline bool dma_map_direct(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) const struct dma_map_ops *ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) return dma_go_direct(dev, *dev->dma_mask, ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140) dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) size_t offset, size_t size, enum dma_data_direction dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) const struct dma_map_ops *ops = get_dma_ops(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) dma_addr_t addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) BUG_ON(!valid_dma_direction(dir));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) if (WARN_ON_ONCE(!dev->dma_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) return DMA_MAPPING_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) if (dma_map_direct(dev, ops))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) addr = ops->map_page(dev, page, offset, size, dir, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) debug_dma_map_page(dev, page, offset, size, dir, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) EXPORT_SYMBOL(dma_map_page_attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162) void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) enum dma_data_direction dir, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) const struct dma_map_ops *ops = get_dma_ops(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) BUG_ON(!valid_dma_direction(dir));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) if (dma_map_direct(dev, ops))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) dma_direct_unmap_page(dev, addr, size, dir, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) else if (ops->unmap_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) ops->unmap_page(dev, addr, size, dir, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) debug_dma_unmap_page(dev, addr, size, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) EXPORT_SYMBOL(dma_unmap_page_attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) * dma_maps_sg_attrs returns 0 on error and > 0 on success.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) * It should never return a value < 0.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) enum dma_data_direction dir, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) const struct dma_map_ops *ops = get_dma_ops(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) int ents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) BUG_ON(!valid_dma_direction(dir));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) if (WARN_ON_ONCE(!dev->dma_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) if (dma_map_direct(dev, ops))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) ents = dma_direct_map_sg(dev, sg, nents, dir, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) ents = ops->map_sg(dev, sg, nents, dir, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) BUG_ON(ents < 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) debug_dma_map_sg(dev, sg, nents, ents, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) return ents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) EXPORT_SYMBOL(dma_map_sg_attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) int nents, enum dma_data_direction dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) const struct dma_map_ops *ops = get_dma_ops(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) BUG_ON(!valid_dma_direction(dir));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) debug_dma_unmap_sg(dev, sg, nents, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) if (dma_map_direct(dev, ops))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) dma_direct_unmap_sg(dev, sg, nents, dir, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) else if (ops->unmap_sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) ops->unmap_sg(dev, sg, nents, dir, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) EXPORT_SYMBOL(dma_unmap_sg_attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) size_t size, enum dma_data_direction dir, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) const struct dma_map_ops *ops = get_dma_ops(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) dma_addr_t addr = DMA_MAPPING_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) BUG_ON(!valid_dma_direction(dir));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) if (WARN_ON_ONCE(!dev->dma_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) return DMA_MAPPING_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) /* Don't allow RAM to be mapped */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) if (WARN_ON_ONCE(pfn_valid(PHYS_PFN(phys_addr))))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) return DMA_MAPPING_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) if (dma_map_direct(dev, ops))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) addr = dma_direct_map_resource(dev, phys_addr, size, dir, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) else if (ops->map_resource)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) debug_dma_map_resource(dev, phys_addr, size, dir, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) EXPORT_SYMBOL(dma_map_resource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) enum dma_data_direction dir, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) const struct dma_map_ops *ops = get_dma_ops(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) BUG_ON(!valid_dma_direction(dir));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) if (!dma_map_direct(dev, ops) && ops->unmap_resource)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) ops->unmap_resource(dev, addr, size, dir, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) debug_dma_unmap_resource(dev, addr, size, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) EXPORT_SYMBOL(dma_unmap_resource);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) const struct dma_map_ops *ops = get_dma_ops(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) BUG_ON(!valid_dma_direction(dir));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) if (dma_map_direct(dev, ops))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) dma_direct_sync_single_for_cpu(dev, addr, size, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) else if (ops->sync_single_for_cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) ops->sync_single_for_cpu(dev, addr, size, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) debug_dma_sync_single_for_cpu(dev, addr, size, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) EXPORT_SYMBOL(dma_sync_single_for_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) void dma_sync_single_for_device(struct device *dev, dma_addr_t addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269) size_t size, enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) const struct dma_map_ops *ops = get_dma_ops(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) BUG_ON(!valid_dma_direction(dir));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) if (dma_map_direct(dev, ops))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) dma_direct_sync_single_for_device(dev, addr, size, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) else if (ops->sync_single_for_device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) ops->sync_single_for_device(dev, addr, size, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) debug_dma_sync_single_for_device(dev, addr, size, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) EXPORT_SYMBOL(dma_sync_single_for_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283) int nelems, enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) const struct dma_map_ops *ops = get_dma_ops(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) BUG_ON(!valid_dma_direction(dir));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) if (dma_map_direct(dev, ops))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290) else if (ops->sync_sg_for_cpu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) ops->sync_sg_for_cpu(dev, sg, nelems, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292) debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) EXPORT_SYMBOL(dma_sync_sg_for_cpu);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) int nelems, enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) const struct dma_map_ops *ops = get_dma_ops(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) BUG_ON(!valid_dma_direction(dir));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302) if (dma_map_direct(dev, ops))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) dma_direct_sync_sg_for_device(dev, sg, nelems, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) else if (ops->sync_sg_for_device)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) ops->sync_sg_for_device(dev, sg, nelems, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308) EXPORT_SYMBOL(dma_sync_sg_for_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) * that the intention is to allow exporting memory allocated via the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313) * coherent DMA APIs through the dma_buf API, which only accepts a
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) * scattertable. This presents a couple of problems:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) * 1. Not all memory allocated via the coherent DMA APIs is backed by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) * a struct page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317) * 2. Passing coherent DMA memory into the streaming APIs is not allowed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) * as we will try to flush the memory through a different alias to that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) * actually being used (and the flushes are redundant.)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322) void *cpu_addr, dma_addr_t dma_addr, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) const struct dma_map_ops *ops = get_dma_ops(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) if (dma_alloc_direct(dev, ops))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) return dma_direct_get_sgtable(dev, sgt, cpu_addr, dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329) size, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) if (!ops->get_sgtable)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) EXPORT_SYMBOL(dma_get_sgtable_attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) #ifdef CONFIG_MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) * Return the page attributes used for mapping dma_alloc_* memory, either in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339) * kernel space if remapping is needed, or to userspace through dma_mmap_*.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) if (force_dma_unencrypted(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) prot = pgprot_decrypted(prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) if (dev_is_dma_coherent(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) return prot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) #ifdef CONFIG_ARCH_HAS_DMA_WRITE_COMBINE
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) if (attrs & DMA_ATTR_WRITE_COMBINE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) return pgprot_writecombine(prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) if (attrs & DMA_ATTR_SYS_CACHE_ONLY ||
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) attrs & DMA_ATTR_SYS_CACHE_ONLY_NWA)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) return pgprot_syscached(prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) return pgprot_dmacoherent(prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356) #endif /* CONFIG_MMU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) * dma_can_mmap - check if a given device supports dma_mmap_*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) * @dev: device to check
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362) * Returns %true if @dev supports dma_mmap_coherent() and dma_mmap_attrs() to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) * map DMA allocations to userspace.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) bool dma_can_mmap(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) const struct dma_map_ops *ops = get_dma_ops(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) if (dma_alloc_direct(dev, ops))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370) return dma_direct_can_mmap(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) return ops->mmap != NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) EXPORT_SYMBOL_GPL(dma_can_mmap);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) * dma_mmap_attrs - map a coherent DMA allocation into user space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) * @vma: vm_area_struct describing requested user mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) * @dma_addr: device-view address returned from dma_alloc_attrs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) * @size: size of memory originally requested in dma_alloc_attrs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) * @attrs: attributes of mapping properties requested in dma_alloc_attrs
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) * Map a coherent DMA buffer previously allocated by dma_alloc_attrs into user
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) * space. The coherent DMA buffer must not be freed by the driver until the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) * user space mapping has been released.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388) int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) void *cpu_addr, dma_addr_t dma_addr, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392) const struct dma_map_ops *ops = get_dma_ops(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) if (dma_alloc_direct(dev, ops))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) return dma_direct_mmap(dev, vma, cpu_addr, dma_addr, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) if (!ops->mmap)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399) return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) EXPORT_SYMBOL(dma_mmap_attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) u64 dma_get_required_mask(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) const struct dma_map_ops *ops = get_dma_ops(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) if (dma_alloc_direct(dev, ops))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408) return dma_direct_get_required_mask(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) if (ops->get_required_mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) return ops->get_required_mask(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413) * We require every DMA ops implementation to at least support a 32-bit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) * DMA mask (and use bounce buffering if that isn't supported in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) * hardware). As the direct mapping code has its own routine to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) * actually report an optimal mask we default to 32-bit here as that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) * is the right thing for most IOMMUs, and at least not actively
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418) * harmful in general.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) return DMA_BIT_MASK(32);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) EXPORT_SYMBOL_GPL(dma_get_required_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) gfp_t flag, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) const struct dma_map_ops *ops = get_dma_ops(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) void *cpu_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) WARN_ON_ONCE(!dev->coherent_dma_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) return cpu_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) /* let the implementation decide on the zone to allocate from: */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438) if (dma_alloc_direct(dev, ops))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) cpu_addr = dma_direct_alloc(dev, size, dma_handle, flag, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) else if (ops->alloc)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) return cpu_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) EXPORT_SYMBOL(dma_alloc_attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) dma_addr_t dma_handle, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) const struct dma_map_ops *ops = get_dma_ops(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) * On non-coherent platforms which implement DMA-coherent buffers via
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) * non-cacheable remaps, ops->free() may call vunmap(). Thus getting
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) * this far in IRQ context is a) at risk of a BUG_ON() or trying to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) * sleep on some machines, and b) an indication that the driver is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) * probably misusing the coherent API anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) WARN_ON(irqs_disabled());
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) if (!cpu_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) if (dma_alloc_direct(dev, ops))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) dma_direct_free(dev, size, cpu_addr, dma_handle, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472) else if (ops->free)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) ops->free(dev, size, cpu_addr, dma_handle, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) EXPORT_SYMBOL(dma_free_attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) struct page *dma_alloc_pages(struct device *dev, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478) dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) const struct dma_map_ops *ops = get_dma_ops(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483) if (WARN_ON_ONCE(!dev->coherent_dma_mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) if (WARN_ON_ONCE(gfp & (__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488) size = PAGE_ALIGN(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) if (dma_alloc_direct(dev, ops))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) page = dma_direct_alloc_pages(dev, size, dma_handle, dir, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) else if (ops->alloc_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) page = ops->alloc_pages(dev, size, dma_handle, dir, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) debug_dma_map_page(dev, page, 0, size, dir, *dma_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) return page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) EXPORT_SYMBOL_GPL(dma_alloc_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) void dma_free_pages(struct device *dev, size_t size, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) dma_addr_t dma_handle, enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) const struct dma_map_ops *ops = get_dma_ops(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) size = PAGE_ALIGN(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) debug_dma_unmap_page(dev, dma_handle, size, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) if (dma_alloc_direct(dev, ops))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) dma_direct_free_pages(dev, size, page, dma_handle, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) else if (ops->free_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) ops->free_pages(dev, size, page, dma_handle, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) EXPORT_SYMBOL_GPL(dma_free_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) void *dma_alloc_noncoherent(struct device *dev, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) const struct dma_map_ops *ops = get_dma_ops(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) void *vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) if (!ops || !ops->alloc_noncoherent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) page = dma_alloc_pages(dev, size, dma_handle, dir, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529) return page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) size = PAGE_ALIGN(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533) vaddr = ops->alloc_noncoherent(dev, size, dma_handle, dir, gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) if (vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535) debug_dma_map_page(dev, virt_to_page(vaddr), 0, size, dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) *dma_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) return vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) EXPORT_SYMBOL_GPL(dma_alloc_noncoherent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) dma_addr_t dma_handle, enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) const struct dma_map_ops *ops = get_dma_ops(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) if (!ops || !ops->free_noncoherent) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) dma_free_pages(dev, size, virt_to_page(vaddr), dma_handle, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) size = PAGE_ALIGN(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) debug_dma_unmap_page(dev, dma_handle, size, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) ops->free_noncoherent(dev, size, vaddr, dma_handle, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) EXPORT_SYMBOL_GPL(dma_free_noncoherent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) int dma_supported(struct device *dev, u64 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) const struct dma_map_ops *ops = get_dma_ops(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) * ->dma_supported sets the bypass flag, so we must always call
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) * into the method here unless the device is truly direct mapped.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) if (!ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566) return dma_direct_supported(dev, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) if (!ops->dma_supported)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) return ops->dma_supported(dev, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) EXPORT_SYMBOL(dma_supported);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) #ifdef CONFIG_ARCH_HAS_DMA_SET_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574) void arch_dma_set_mask(struct device *dev, u64 mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) #define arch_dma_set_mask(dev, mask) do { } while (0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) int dma_set_mask(struct device *dev, u64 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) * Truncate the mask to the actually supported dma_addr_t width to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583) * avoid generating unsupportable addresses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) mask = (dma_addr_t)mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587) if (!dev->dma_mask || !dma_supported(dev, mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) arch_dma_set_mask(dev, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) *dev->dma_mask = mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594) EXPORT_SYMBOL(dma_set_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) #ifndef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) int dma_set_coherent_mask(struct device *dev, u64 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) * Truncate the mask to the actually supported dma_addr_t width to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) * avoid generating unsupportable addresses.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) mask = (dma_addr_t)mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) if (!dma_supported(dev, mask))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) return -EIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) dev->coherent_dma_mask = mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) EXPORT_SYMBOL(dma_set_coherent_mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) size_t dma_max_mapping_size(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) const struct dma_map_ops *ops = get_dma_ops(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) size_t size = SIZE_MAX;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619) if (dma_map_direct(dev, ops))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) size = dma_direct_max_mapping_size(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) else if (ops && ops->max_mapping_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) size = ops->max_mapping_size(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624) return size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) EXPORT_SYMBOL_GPL(dma_max_mapping_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630) const struct dma_map_ops *ops = get_dma_ops(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) if (dma_map_direct(dev, ops))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) return dma_direct_need_sync(dev, dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) return ops->sync_single_for_cpu || ops->sync_single_for_device;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) EXPORT_SYMBOL_GPL(dma_need_sync);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) unsigned long dma_get_merge_boundary(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640) const struct dma_map_ops *ops = get_dma_ops(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) if (!ops || !ops->get_merge_boundary)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) return 0; /* can't merge */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) return ops->get_merge_boundary(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) EXPORT_SYMBOL_GPL(dma_get_merge_boundary);