^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * linux/arch/arm/mm/dma-mapping.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2000-2004 Russell King
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * DMA uncached mapping support.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/module.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/genalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/errno.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/list.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/init.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/dma-direct.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/dma-map-ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <linux/memblock.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <linux/iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) #include <linux/sizes.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) #include <linux/cma.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #include <asm/memory.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) #include <asm/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) #include <asm/mach/arch.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #include <asm/dma-iommu.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) #include <asm/mach/map.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #include <asm/system_info.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #include <xen/swiotlb-xen.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include "dma.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) #include "mm.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) struct arm_dma_alloc_args {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) gfp_t gfp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) pgprot_t prot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) const void *caller;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) bool want_vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) int coherent_flag;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) struct arm_dma_free_args {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) struct device *dev;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) size_t size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) void *cpu_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) bool want_vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) #define NORMAL 0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) #define COHERENT 1
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) struct arm_dma_allocator {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) void *(*alloc)(struct arm_dma_alloc_args *args,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) struct page **ret_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) void (*free)(struct arm_dma_free_args *args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) struct arm_dma_buffer {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) struct list_head list;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) void *virt;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) struct arm_dma_allocator *allocator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) static LIST_HEAD(arm_dma_bufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) static DEFINE_SPINLOCK(arm_dma_bufs_lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) static struct arm_dma_buffer *arm_dma_buffer_find(void *virt)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) struct arm_dma_buffer *buf, *found = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) spin_lock_irqsave(&arm_dma_bufs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) list_for_each_entry(buf, &arm_dma_bufs, list) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) if (buf->virt == virt) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) list_del(&buf->list);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) found = buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) spin_unlock_irqrestore(&arm_dma_bufs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) return found;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) * The DMA API is built upon the notion of "buffer ownership". A buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) * is either exclusively owned by the CPU (and therefore may be accessed
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * by it) or exclusively owned by the DMA device. These helper functions
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * represent the transitions between these two ownership states.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) * Note, however, that on later ARMs, this notion does not work due to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) * speculative prefetches. We model our approach on the assumption that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) * the CPU does do speculative prefetches, which means we clean caches
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) * before transfers and delay cache invalidation until transfer completion.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) static void __dma_page_cpu_to_dev(struct page *, unsigned long,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) size_t, enum dma_data_direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) static void __dma_page_dev_to_cpu(struct page *, unsigned long,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) size_t, enum dma_data_direction);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) * arm_dma_map_page - map a portion of a page for streaming DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) * @page: page that buffer resides in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) * @offset: offset into page for start of buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) * @size: size of buffer to map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) * @dir: DMA transfer direction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * Ensure that any data held in the cache is appropriately discarded
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * or written back.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) * The device owns this memory once this call has completed. The CPU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) * can regain ownership by calling dma_unmap_page().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) unsigned long offset, size_t size, enum dma_data_direction dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130) __dma_page_cpu_to_dev(page, offset, size, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) return pfn_to_dma(dev, page_to_pfn(page)) + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) unsigned long offset, size_t size, enum dma_data_direction dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) return pfn_to_dma(dev, page_to_pfn(page)) + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144) * @handle: DMA address of buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) * @size: size of buffer (same as passed to dma_map_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) * @dir: DMA transfer direction (same as passed to dma_map_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) * Unmap a page streaming mode DMA translation. The handle and size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) * must match what was provided in the previous dma_map_page() call.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150) * All other usages are undefined.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) * After this call, reads by the CPU to the buffer are guaranteed to see
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) * whatever the device wrote there.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156) size_t size, enum dma_data_direction dir, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) handle & ~PAGE_MASK, size, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) static void arm_dma_sync_single_for_cpu(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) dma_addr_t handle, size_t size, enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166) unsigned int offset = handle & (PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) __dma_page_dev_to_cpu(page, offset, size, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) static void arm_dma_sync_single_for_device(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172) dma_addr_t handle, size_t size, enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) unsigned int offset = handle & (PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176) __dma_page_cpu_to_dev(page, offset, size, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) * Return whether the given device DMA address mask can be supported
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) * properly. For example, if your device can only drive the low 24-bits
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) * during bus mastering, then you would pass 0x00ffffff as the mask
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) * to this function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) static int arm_dma_supported(struct device *dev, u64 mask)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) unsigned long max_dma_pfn = min(max_pfn - 1, arm_dma_pfn_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) * Translate the device's DMA mask to a PFN limit. This
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) * PFN number includes the page which we can DMA to.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193) return dma_to_pfn(dev, mask) >= max_dma_pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) const struct dma_map_ops arm_dma_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) .alloc = arm_dma_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) .free = arm_dma_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) .alloc_pages = dma_direct_alloc_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) .free_pages = dma_direct_free_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) .mmap = arm_dma_mmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) .get_sgtable = arm_dma_get_sgtable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) .map_page = arm_dma_map_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) .unmap_page = arm_dma_unmap_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) .map_sg = arm_dma_map_sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) .unmap_sg = arm_dma_unmap_sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) .map_resource = dma_direct_map_resource,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) .sync_single_for_cpu = arm_dma_sync_single_for_cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) .sync_single_for_device = arm_dma_sync_single_for_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211) .sync_sg_for_device = arm_dma_sync_sg_for_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) .dma_supported = arm_dma_supported,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) .get_required_mask = dma_direct_get_required_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 215) EXPORT_SYMBOL(arm_dma_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 216)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 217) static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 218) dma_addr_t *handle, gfp_t gfp, unsigned long attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 219) static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 220) dma_addr_t handle, unsigned long attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 221) static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 222) void *cpu_addr, dma_addr_t dma_addr, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 223) unsigned long attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 224)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 225) const struct dma_map_ops arm_coherent_dma_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 226) .alloc = arm_coherent_dma_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 227) .free = arm_coherent_dma_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 228) .alloc_pages = dma_direct_alloc_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 229) .free_pages = dma_direct_free_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 230) .mmap = arm_coherent_dma_mmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 231) .get_sgtable = arm_dma_get_sgtable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 232) .map_page = arm_coherent_dma_map_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 233) .map_sg = arm_dma_map_sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 234) .map_resource = dma_direct_map_resource,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 235) .dma_supported = arm_dma_supported,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 236) .get_required_mask = dma_direct_get_required_mask,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 237) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 238) EXPORT_SYMBOL(arm_coherent_dma_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 239)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 240) static void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 241) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 242) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 243) * Ensure that the allocated pages are zeroed, and that any data
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 244) * lurking in the kernel direct-mapped region is invalidated.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 245) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 246) if (PageHighMem(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 247) phys_addr_t base = __pfn_to_phys(page_to_pfn(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 248) phys_addr_t end = base + size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 249) while (size > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 250) void *ptr = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 251) memset(ptr, 0, PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 252) if (coherent_flag != COHERENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 253) dmac_flush_range(ptr, ptr + PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 254) kunmap_atomic(ptr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 255) page++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 256) size -= PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 257) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 258) if (coherent_flag != COHERENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 259) outer_flush_range(base, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 260) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 261) void *ptr = page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 262) memset(ptr, 0, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 263) if (coherent_flag != COHERENT) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 264) dmac_flush_range(ptr, ptr + size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 265) outer_flush_range(__pa(ptr), __pa(ptr) + size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 266) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 267) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 268) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 269)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 270) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 271) * Allocate a DMA buffer for 'dev' of size 'size' using the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 272) * specified gfp mask. Note that 'size' must be page aligned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 273) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 274) static struct page *__dma_alloc_buffer(struct device *dev, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 275) gfp_t gfp, int coherent_flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 276) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 277) unsigned long order = get_order(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 278) struct page *page, *p, *e;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 280) page = alloc_pages(gfp, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 281) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 282) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 283)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 284) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 285) * Now split the huge page and free the excess pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 286) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 287) split_page(page, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 288) for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 289) __free_page(p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 290)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 291) __dma_clear_buffer(page, size, coherent_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 292)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 293) return page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 294) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 295)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 296) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 297) * Free a DMA buffer. 'size' must be page aligned.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 298) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 299) static void __dma_free_buffer(struct page *page, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 300) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 301) struct page *e = page + (size >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 302)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 303) while (page < e) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 304) __free_page(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 305) page++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 306) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 307) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 308)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 309) static void *__alloc_from_contiguous(struct device *dev, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 310) pgprot_t prot, struct page **ret_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 311) const void *caller, bool want_vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 312) int coherent_flag, gfp_t gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 314) static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 315) pgprot_t prot, struct page **ret_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 316) const void *caller, bool want_vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 318) #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 319) static struct gen_pool *atomic_pool __ro_after_init;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 320)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 321) static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 322)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 323) static int __init early_coherent_pool(char *p)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 325) atomic_pool_size = memparse(p, &p);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 326) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 327) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 328) early_param("coherent_pool", early_coherent_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 330) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 331) * Initialise the coherent pool for atomic allocations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 332) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 333) static int __init atomic_pool_init(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 334) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 335) pgprot_t prot = pgprot_dmacoherent(PAGE_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 336) gfp_t gfp = GFP_KERNEL | GFP_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 337) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 338) void *ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 339)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 340) atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 341) if (!atomic_pool)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 342) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 343) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 344) * The atomic pool is only used for non-coherent allocations
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 345) * so we must pass NORMAL for coherent_flag.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 346) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 347) if (dev_get_cma_area(NULL))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 348) ptr = __alloc_from_contiguous(NULL, atomic_pool_size, prot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 349) &page, atomic_pool_init, true, NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 350) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 351) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 352) ptr = __alloc_remap_buffer(NULL, atomic_pool_size, gfp, prot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 353) &page, atomic_pool_init, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 354) if (ptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 355) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 356)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 357) ret = gen_pool_add_virt(atomic_pool, (unsigned long)ptr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 358) page_to_phys(page),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 359) atomic_pool_size, -1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 360) if (ret)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 361) goto destroy_genpool;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 362)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 363) gen_pool_set_algo(atomic_pool,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 364) gen_pool_first_fit_order_align,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 365) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 366) pr_info("DMA: preallocated %zu KiB pool for atomic coherent allocations\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 367) atomic_pool_size / 1024);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 368) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 369) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 370)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 371) destroy_genpool:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 372) gen_pool_destroy(atomic_pool);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 373) atomic_pool = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 374) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 375) pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 376) atomic_pool_size / 1024);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 377) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 378) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 379) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 380) * CMA is activated by core_initcall, so we must be called after it.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 381) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 382) postcore_initcall(atomic_pool_init);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 384) struct dma_contig_early_reserve {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 385) phys_addr_t base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 386) unsigned long size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 387) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 388)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 389) static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 390)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 391) static int dma_mmu_remap_num __initdata;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 392)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 393) void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 394) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 395) dma_mmu_remap[dma_mmu_remap_num].base = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 396) dma_mmu_remap[dma_mmu_remap_num].size = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 397) dma_mmu_remap_num++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 398) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 399)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 400) void __init dma_contiguous_remap(void)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 401) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 402) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 403) for (i = 0; i < dma_mmu_remap_num; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 404) phys_addr_t start = dma_mmu_remap[i].base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 405) phys_addr_t end = start + dma_mmu_remap[i].size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 406) struct map_desc map;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 407) unsigned long addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 408)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 409) if (end > arm_lowmem_limit)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 410) end = arm_lowmem_limit;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 411) if (start >= end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 412) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 413)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 414) map.pfn = __phys_to_pfn(start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 415) map.virtual = __phys_to_virt(start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 416) map.length = end - start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 417) map.type = MT_MEMORY_DMA_READY;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 418)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 419) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 420) * Clear previous low-memory mapping to ensure that the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 421) * TLB does not see any conflicting entries, then flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 422) * the TLB of the old entries before creating new mappings.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 423) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 424) * This ensures that any speculatively loaded TLB entries
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 425) * (even though they may be rare) can not cause any problems,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 426) * and ensures that this code is architecturally compliant.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 427) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 428) for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 429) addr += PMD_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 430) pmd_clear(pmd_off_k(addr));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 431)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 432) flush_tlb_kernel_range(__phys_to_virt(start),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 433) __phys_to_virt(end));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 434)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 435) iotable_init(&map, 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 436) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 437) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 438)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 439) static int __dma_update_pte(pte_t *pte, unsigned long addr, void *data)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 440) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 441) struct page *page = virt_to_page(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 442) pgprot_t prot = *(pgprot_t *)data;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 443)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 444) set_pte_ext(pte, mk_pte(page, prot), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 445) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 446) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 447)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 448) static void __dma_remap(struct page *page, size_t size, pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 449) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 450) unsigned long start = (unsigned long) page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 451) unsigned end = start + size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 452)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 453) apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 454) flush_tlb_kernel_range(start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 455) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 457) static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 458) pgprot_t prot, struct page **ret_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 459) const void *caller, bool want_vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 460) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 461) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 462) void *ptr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 463) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 464) * __alloc_remap_buffer is only called when the device is
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 465) * non-coherent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 466) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 467) page = __dma_alloc_buffer(dev, size, gfp, NORMAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 468) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 469) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 470) if (!want_vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 471) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 472)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 473) ptr = dma_common_contiguous_remap(page, size, prot, caller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 474) if (!ptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 475) __dma_free_buffer(page, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 476) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 477) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 478)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 479) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 480) *ret_page = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 481) return ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 482) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 483)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 484) static void *__alloc_from_pool(size_t size, struct page **ret_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 485) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 486) unsigned long val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 487) void *ptr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 488)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 489) if (!atomic_pool) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 490) WARN(1, "coherent pool not initialised!\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 491) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 492) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 494) val = gen_pool_alloc(atomic_pool, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 495) if (val) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 496) phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 497)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 498) *ret_page = phys_to_page(phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 499) ptr = (void *)val;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 500) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 501)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 502) return ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 503) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 504)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 505) static bool __in_atomic_pool(void *start, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 506) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 507) return gen_pool_has_addr(atomic_pool, (unsigned long)start, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 508) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 509)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 510) static int __free_from_pool(void *start, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 511) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 512) if (!__in_atomic_pool(start, size))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 513) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 514)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 515) gen_pool_free(atomic_pool, (unsigned long)start, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 516)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 517) return 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 518) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 519)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 520) static void *__alloc_from_contiguous(struct device *dev, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 521) pgprot_t prot, struct page **ret_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 522) const void *caller, bool want_vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 523) int coherent_flag, gfp_t gfp)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 524) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 525) unsigned long order = get_order(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 526) size_t count = size >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 527) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 528) void *ptr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 529)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 530) page = dma_alloc_from_contiguous(dev, count, order, gfp & __GFP_NOWARN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 531) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 532) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 534) __dma_clear_buffer(page, size, coherent_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 535)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 536) if (!want_vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 537) goto out;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 538)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 539) if (PageHighMem(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 540) ptr = dma_common_contiguous_remap(page, size, prot, caller);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 541) if (!ptr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 542) dma_release_from_contiguous(dev, page, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 543) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 544) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 545) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 546) __dma_remap(page, size, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 547) ptr = page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 548) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 549)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 550) out:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 551) *ret_page = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 552) return ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 553) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 554)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 555) static void __free_from_contiguous(struct device *dev, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 556) void *cpu_addr, size_t size, bool want_vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 557) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 558) if (want_vaddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 559) if (PageHighMem(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 560) dma_common_free_remap(cpu_addr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 561) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 562) __dma_remap(page, size, PAGE_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 563) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 564) dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 565) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 567) static inline pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 568) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 569) prot = (attrs & DMA_ATTR_WRITE_COMBINE) ?
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 570) pgprot_writecombine(prot) :
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 571) pgprot_dmacoherent(prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 572) return prot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 573) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 574)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 575) static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 576) struct page **ret_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 577) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 578) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 579) /* __alloc_simple_buffer is only called when the device is coherent */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 580) page = __dma_alloc_buffer(dev, size, gfp, COHERENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 581) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 582) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 583)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 584) *ret_page = page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 585) return page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 586) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 587)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 588) static void *simple_allocator_alloc(struct arm_dma_alloc_args *args,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 589) struct page **ret_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 590) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 591) return __alloc_simple_buffer(args->dev, args->size, args->gfp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 592) ret_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 593) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 594)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 595) static void simple_allocator_free(struct arm_dma_free_args *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 596) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 597) __dma_free_buffer(args->page, args->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 598) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 600) static struct arm_dma_allocator simple_allocator = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 601) .alloc = simple_allocator_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 602) .free = simple_allocator_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 603) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 605) static void *cma_allocator_alloc(struct arm_dma_alloc_args *args,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 606) struct page **ret_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 607) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 608) return __alloc_from_contiguous(args->dev, args->size, args->prot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 609) ret_page, args->caller,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 610) args->want_vaddr, args->coherent_flag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 611) args->gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 612) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 613)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 614) static void cma_allocator_free(struct arm_dma_free_args *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 615) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 616) __free_from_contiguous(args->dev, args->page, args->cpu_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 617) args->size, args->want_vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 620) static struct arm_dma_allocator cma_allocator = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 621) .alloc = cma_allocator_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 622) .free = cma_allocator_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 623) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 624)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 625) static void *pool_allocator_alloc(struct arm_dma_alloc_args *args,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 626) struct page **ret_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 627) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 628) return __alloc_from_pool(args->size, ret_page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 629) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 630)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 631) static void pool_allocator_free(struct arm_dma_free_args *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 632) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 633) __free_from_pool(args->cpu_addr, args->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 634) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 636) static struct arm_dma_allocator pool_allocator = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 637) .alloc = pool_allocator_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 638) .free = pool_allocator_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 639) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 641) static void *remap_allocator_alloc(struct arm_dma_alloc_args *args,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 642) struct page **ret_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 643) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 644) return __alloc_remap_buffer(args->dev, args->size, args->gfp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 645) args->prot, ret_page, args->caller,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 646) args->want_vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 647) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 648)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 649) static void remap_allocator_free(struct arm_dma_free_args *args)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 650) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 651) if (args->want_vaddr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 652) dma_common_free_remap(args->cpu_addr, args->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 653)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 654) __dma_free_buffer(args->page, args->size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 655) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 656)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 657) static struct arm_dma_allocator remap_allocator = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 658) .alloc = remap_allocator_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 659) .free = remap_allocator_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 660) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 661)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 662) static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 663) gfp_t gfp, pgprot_t prot, bool is_coherent,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 664) unsigned long attrs, const void *caller)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 665) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 666) u64 mask = min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 667) struct page *page = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 668) void *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 669) bool allowblock, cma;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 670) struct arm_dma_buffer *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 671) struct arm_dma_alloc_args args = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 672) .dev = dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 673) .size = PAGE_ALIGN(size),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 674) .gfp = gfp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 675) .prot = prot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 676) .caller = caller,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 677) .want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 678) .coherent_flag = is_coherent ? COHERENT : NORMAL,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 679) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 680)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 681) #ifdef CONFIG_DMA_API_DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 682) u64 limit = (mask + 1) & ~mask;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 683) if (limit && size >= limit) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 684) dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 685) size, mask);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 686) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 687) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 688) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 689)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 690) buf = kzalloc(sizeof(*buf),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 691) gfp & ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 692) if (!buf)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 693) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 694)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 695) if (mask < 0xffffffffULL)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 696) gfp |= GFP_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 697)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 698) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 699) * Following is a work-around (a.k.a. hack) to prevent pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 700) * with __GFP_COMP being passed to split_page() which cannot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 701) * handle them. The real problem is that this flag probably
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 702) * should be 0 on ARM as it is not supported on this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 703) * platform; see CONFIG_HUGETLBFS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 704) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 705) gfp &= ~(__GFP_COMP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 706) args.gfp = gfp;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 707)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 708) *handle = DMA_MAPPING_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 709) allowblock = gfpflags_allow_blocking(gfp);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 710) cma = allowblock ? dev_get_cma_area(dev) : false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 711)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 712) if (cma)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 713) buf->allocator = &cma_allocator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 714) else if (is_coherent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 715) buf->allocator = &simple_allocator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 716) else if (allowblock)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 717) buf->allocator = &remap_allocator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 718) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 719) buf->allocator = &pool_allocator;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 720)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 721) addr = buf->allocator->alloc(&args, &page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 722)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 723) if (page) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 724) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 725)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 726) *handle = pfn_to_dma(dev, page_to_pfn(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 727) buf->virt = args.want_vaddr ? addr : page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 728)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 729) spin_lock_irqsave(&arm_dma_bufs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 730) list_add(&buf->list, &arm_dma_bufs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 731) spin_unlock_irqrestore(&arm_dma_bufs_lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 732) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 733) kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 736) return args.want_vaddr ? addr : page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 737) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 738)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 739) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 740) * Allocate DMA-coherent memory space and return both the kernel remapped
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 741) * virtual and bus address for that space.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 742) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 743) void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 744) gfp_t gfp, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 745) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 746) pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 747)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 748) return __dma_alloc(dev, size, handle, gfp, prot, false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 749) attrs, __builtin_return_address(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 750) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 751)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 752) static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 753) dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 754) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 755) return __dma_alloc(dev, size, handle, gfp, PAGE_KERNEL, true,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 756) attrs, __builtin_return_address(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 757) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 758)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 759) static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 760) void *cpu_addr, dma_addr_t dma_addr, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 761) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 762) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 763) int ret = -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 764) unsigned long nr_vma_pages = vma_pages(vma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 765) unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 766) unsigned long pfn = dma_to_pfn(dev, dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 767) unsigned long off = vma->vm_pgoff;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 768)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 769) if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 770) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 771)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 772) if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 773) ret = remap_pfn_range(vma, vma->vm_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 774) pfn + off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 775) vma->vm_end - vma->vm_start,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 776) vma->vm_page_prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 777) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 778)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 779) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 780) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 781)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 782) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 783) * Create userspace mapping for the DMA-coherent memory.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 784) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 785) static int arm_coherent_dma_mmap(struct device *dev, struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 786) void *cpu_addr, dma_addr_t dma_addr, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 787) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 788) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 789) return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 790) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 792) int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 793) void *cpu_addr, dma_addr_t dma_addr, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 794) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 795) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 796) vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 797) return __arm_dma_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 798) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 799)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 800) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 801) * Free a buffer as defined by the above mapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 802) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 803) static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 804) dma_addr_t handle, unsigned long attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 805) bool is_coherent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 806) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 807) struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 808) struct arm_dma_buffer *buf;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 809) struct arm_dma_free_args args = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 810) .dev = dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 811) .size = PAGE_ALIGN(size),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 812) .cpu_addr = cpu_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 813) .page = page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 814) .want_vaddr = ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 815) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 816)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 817) buf = arm_dma_buffer_find(cpu_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 818) if (WARN(!buf, "Freeing invalid buffer %p\n", cpu_addr))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 819) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 820)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 821) buf->allocator->free(&args);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 822) kfree(buf);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 823) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 824)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 825) void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 826) dma_addr_t handle, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 827) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 828) __arm_dma_free(dev, size, cpu_addr, handle, attrs, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 829) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 830)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 831) static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 832) dma_addr_t handle, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 833) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 834) __arm_dma_free(dev, size, cpu_addr, handle, attrs, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 835) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 836)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 837) int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 838) void *cpu_addr, dma_addr_t handle, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 839) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 840) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 841) unsigned long pfn = dma_to_pfn(dev, handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 842) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 843) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 844)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 845) /* If the PFN is not valid, we do not have a struct page */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 846) if (!pfn_valid(pfn))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 847) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 848)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 849) page = pfn_to_page(pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 850)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 851) ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 852) if (unlikely(ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 853) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 854)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 855) sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 856) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 857) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 858)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 859) static void dma_cache_maint_page(struct page *page, unsigned long offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 860) size_t size, enum dma_data_direction dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 861) void (*op)(const void *, size_t, int))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 862) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 863) unsigned long pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 864) size_t left = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 866) pfn = page_to_pfn(page) + offset / PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 867) offset %= PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 868)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 869) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 870) * A single sg entry may refer to multiple physically contiguous
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 871) * pages. But we still need to process highmem pages individually.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 872) * If highmem is not configured then the bulk of this loop gets
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 873) * optimized out.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 874) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 875) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 876) size_t len = left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 877) void *vaddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 878)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 879) page = pfn_to_page(pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 880)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 881) if (PageHighMem(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 882) if (len + offset > PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 883) len = PAGE_SIZE - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 884)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 885) if (cache_is_vipt_nonaliasing()) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 886) vaddr = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 887) op(vaddr + offset, len, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 888) kunmap_atomic(vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 889) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 890) vaddr = kmap_high_get(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 891) if (vaddr) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 892) op(vaddr + offset, len, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 893) kunmap_high(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 894) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 895) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 896) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 897) vaddr = page_address(page) + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 898) op(vaddr, len, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 899) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 900) offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 901) pfn++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 902) left -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 903) } while (left);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 904) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 905)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 906) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 907) * Make an area consistent for devices.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 908) * Note: Drivers should NOT use this function directly, as it will break
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 909) * platforms with CONFIG_DMABOUNCE.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 910) * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 911) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 912) static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 913) size_t size, enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 914) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 915) phys_addr_t paddr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 916)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 917) dma_cache_maint_page(page, off, size, dir, dmac_map_area);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 918)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 919) paddr = page_to_phys(page) + off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 920) if (dir == DMA_FROM_DEVICE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 921) outer_inv_range(paddr, paddr + size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 922) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 923) outer_clean_range(paddr, paddr + size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 924) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 925) /* FIXME: non-speculating: flush on bidirectional mappings? */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 926) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 927)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 928) static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 929) size_t size, enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 930) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 931) phys_addr_t paddr = page_to_phys(page) + off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 932)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 933) /* FIXME: non-speculating: not required */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 934) /* in any case, don't bother invalidating if DMA to device */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 935) if (dir != DMA_TO_DEVICE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 936) outer_inv_range(paddr, paddr + size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 937)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 938) dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 939) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 940)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 941) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 942) * Mark the D-cache clean for these pages to avoid extra flushing.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 943) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 944) if (dir != DMA_TO_DEVICE && size >= PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 945) unsigned long pfn;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 946) size_t left = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 948) pfn = page_to_pfn(page) + off / PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 949) off %= PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 950) if (off) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 951) pfn++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 952) left -= PAGE_SIZE - off;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 954) while (left >= PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 955) page = pfn_to_page(pfn++);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 956) set_bit(PG_dcache_clean, &page->flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 957) left -= PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 958) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 959) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 960) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 961)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 962) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 963) * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 964) * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 965) * @sg: list of buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 966) * @nents: number of buffers to map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 967) * @dir: DMA transfer direction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 968) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 969) * Map a set of buffers described by scatterlist in streaming mode for DMA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 970) * This is the scatter-gather version of the dma_map_single interface.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 971) * Here the scatter gather list elements are each tagged with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 972) * appropriate dma address and length. They are obtained via
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 973) * sg_dma_{address,length}.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 974) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 975) * Device ownership issues as mentioned for dma_map_single are the same
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 976) * here.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 977) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 978) int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 979) enum dma_data_direction dir, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 980) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 981) const struct dma_map_ops *ops = get_dma_ops(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 982) struct scatterlist *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 983) int i, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 984)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 985) for_each_sg(sg, s, nents, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 986) #ifdef CONFIG_NEED_SG_DMA_LENGTH
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 987) s->dma_length = s->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 988) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 989) s->dma_address = ops->map_page(dev, sg_page(s), s->offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 990) s->length, dir, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 991) if (dma_mapping_error(dev, s->dma_address))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 992) goto bad_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 993) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 994) return nents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 995)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 996) bad_mapping:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 997) for_each_sg(sg, s, i, j)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 998) ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 999) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1000) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1002) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1003) * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1004) * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1005) * @sg: list of buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1006) * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1007) * @dir: DMA transfer direction (same as was passed to dma_map_sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1008) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1009) * Unmap a set of streaming mode DMA translations. Again, CPU access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1010) * rules concerning calls here are the same as for dma_unmap_single().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1011) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1012) void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1013) enum dma_data_direction dir, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1014) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1015) const struct dma_map_ops *ops = get_dma_ops(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1016) struct scatterlist *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1017)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1018) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1020) for_each_sg(sg, s, nents, i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1021) ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1022) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1023)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1024) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1025) * arm_dma_sync_sg_for_cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1026) * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1027) * @sg: list of buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1028) * @nents: number of buffers to map (returned from dma_map_sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1029) * @dir: DMA transfer direction (same as was passed to dma_map_sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1030) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1031) void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1032) int nents, enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1033) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1034) const struct dma_map_ops *ops = get_dma_ops(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1035) struct scatterlist *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1036) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1037)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1038) for_each_sg(sg, s, nents, i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1039) ops->sync_single_for_cpu(dev, sg_dma_address(s), s->length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1040) dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1041) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1042)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1043) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1044) * arm_dma_sync_sg_for_device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1045) * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1046) * @sg: list of buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1047) * @nents: number of buffers to map (returned from dma_map_sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1048) * @dir: DMA transfer direction (same as was passed to dma_map_sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1049) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1050) void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1051) int nents, enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1052) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1053) const struct dma_map_ops *ops = get_dma_ops(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1054) struct scatterlist *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1055) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1057) for_each_sg(sg, s, nents, i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1058) ops->sync_single_for_device(dev, sg_dma_address(s), s->length,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1059) dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1060) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1061)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1062) static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1063) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1064) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1065) * When CONFIG_ARM_LPAE is set, physical address can extend above
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1066) * 32-bits, which then can't be addressed by devices that only support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1067) * 32-bit DMA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1068) * Use the generic dma-direct / swiotlb ops code in that case, as that
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1069) * handles bounce buffering for us.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1070) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1071) if (IS_ENABLED(CONFIG_ARM_LPAE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1072) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1073) return coherent ? &arm_coherent_dma_ops : &arm_dma_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1074) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1076) #ifdef CONFIG_ARM_DMA_USE_IOMMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1078) static int __dma_info_to_prot(enum dma_data_direction dir, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1079) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1080) int prot = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1082) if (attrs & DMA_ATTR_PRIVILEGED)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1083) prot |= IOMMU_PRIV;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1084)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1085) switch (dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1086) case DMA_BIDIRECTIONAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1087) return prot | IOMMU_READ | IOMMU_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1088) case DMA_TO_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1089) return prot | IOMMU_READ;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1090) case DMA_FROM_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1091) return prot | IOMMU_WRITE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1092) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1093) return prot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1094) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1095) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1096)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1097) /* IOMMU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1098)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1099) static int extend_iommu_mapping(struct dma_iommu_mapping *mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1101) static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1102) size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1103) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1104) unsigned int order = get_order(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1105) unsigned int align = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1106) unsigned int count, start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1107) size_t mapping_size = mapping->bits << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1108) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1109) dma_addr_t iova;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1110) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1112) if (order > CONFIG_ARM_DMA_IOMMU_ALIGNMENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1113) order = CONFIG_ARM_DMA_IOMMU_ALIGNMENT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1115) count = PAGE_ALIGN(size) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1116) align = (1 << order) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1118) spin_lock_irqsave(&mapping->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1119) for (i = 0; i < mapping->nr_bitmaps; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1120) start = bitmap_find_next_zero_area(mapping->bitmaps[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1121) mapping->bits, 0, count, align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1123) if (start > mapping->bits)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1124) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1126) bitmap_set(mapping->bitmaps[i], start, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1127) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1128) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1129)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1130) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1131) * No unused range found. Try to extend the existing mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1132) * and perform a second attempt to reserve an IO virtual
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1133) * address range of size bytes.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1134) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1135) if (i == mapping->nr_bitmaps) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1136) if (extend_iommu_mapping(mapping)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1137) spin_unlock_irqrestore(&mapping->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1138) return DMA_MAPPING_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1139) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1141) start = bitmap_find_next_zero_area(mapping->bitmaps[i],
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1142) mapping->bits, 0, count, align);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1143)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1144) if (start > mapping->bits) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1145) spin_unlock_irqrestore(&mapping->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1146) return DMA_MAPPING_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1149) bitmap_set(mapping->bitmaps[i], start, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1150) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1151) spin_unlock_irqrestore(&mapping->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1152)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1153) iova = mapping->base + (mapping_size * i);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1154) iova += start << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1155)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1156) return iova;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1157) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1158)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1159) static inline void __free_iova(struct dma_iommu_mapping *mapping,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1160) dma_addr_t addr, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1161) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1162) unsigned int start, count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1163) size_t mapping_size = mapping->bits << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1164) unsigned long flags;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1165) dma_addr_t bitmap_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1166) u32 bitmap_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1167)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1168) if (!size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1169) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1171) bitmap_index = (u32) (addr - mapping->base) / (u32) mapping_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1172) BUG_ON(addr < mapping->base || bitmap_index > mapping->extensions);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1173)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1174) bitmap_base = mapping->base + mapping_size * bitmap_index;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1176) start = (addr - bitmap_base) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1177)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1178) if (addr + size > bitmap_base + mapping_size) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1179) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1180) * The address range to be freed reaches into the iova
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1181) * range of the next bitmap. This should not happen as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1182) * we don't allow this in __alloc_iova (at the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1183) * moment).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1184) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1185) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1186) } else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1187) count = size >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1188)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1189) spin_lock_irqsave(&mapping->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1190) bitmap_clear(mapping->bitmaps[bitmap_index], start, count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1191) spin_unlock_irqrestore(&mapping->lock, flags);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1192) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1194) /* We'll try 2M, 1M, 64K, and finally 4K; array must end with 0! */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1195) static const int iommu_order_array[] = { 9, 8, 4, 0 };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1196)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1197) static struct page **__iommu_alloc_buffer(struct device *dev, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1198) gfp_t gfp, unsigned long attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1199) int coherent_flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1200) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1201) struct page **pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1202) int count = size >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1203) int array_size = count * sizeof(struct page *);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1204) int i = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1205) int order_idx = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1207) if (array_size <= PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1208) pages = kzalloc(array_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1209) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1210) pages = vzalloc(array_size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1211) if (!pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1212) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1213)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1214) if (attrs & DMA_ATTR_FORCE_CONTIGUOUS)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1215) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1216) unsigned long order = get_order(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1217) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1218)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1219) page = dma_alloc_from_contiguous(dev, count, order,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1220) gfp & __GFP_NOWARN);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1221) if (!page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1222) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1223)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1224) __dma_clear_buffer(page, size, coherent_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1225)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1226) for (i = 0; i < count; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1227) pages[i] = page + i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1229) return pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1232) /* Go straight to 4K chunks if caller says it's OK. */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1233) if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1234) order_idx = ARRAY_SIZE(iommu_order_array) - 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1236) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1237) * IOMMU can map any pages, so himem can also be used here
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1238) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1239) gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1240)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1241) while (count) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1242) int j, order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1243)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1244) order = iommu_order_array[order_idx];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1245)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1246) /* Drop down when we get small */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1247) if (__fls(count) < order) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1248) order_idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1249) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1250) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1251)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1252) if (order) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1253) /* See if it's easy to allocate a high-order chunk */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1254) pages[i] = alloc_pages(gfp | __GFP_NORETRY, order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1255)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1256) /* Go down a notch at first sign of pressure */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1257) if (!pages[i]) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1258) order_idx++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1259) continue;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1260) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1261) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1262) pages[i] = alloc_pages(gfp, 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1263) if (!pages[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1264) goto error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1265) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1267) if (order) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1268) split_page(pages[i], order);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1269) j = 1 << order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1270) while (--j)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1271) pages[i + j] = pages[i] + j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1272) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1273)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1274) __dma_clear_buffer(pages[i], PAGE_SIZE << order, coherent_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1275) i += 1 << order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1276) count -= 1 << order;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1277) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1278)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1279) return pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1280) error:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1281) while (i--)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1282) if (pages[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1283) __free_pages(pages[i], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1284) kvfree(pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1285) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1286) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1287)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1288) static int __iommu_free_buffer(struct device *dev, struct page **pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1289) size_t size, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1291) int count = size >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1292) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1294) if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1295) dma_release_from_contiguous(dev, pages[0], count);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1296) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1297) for (i = 0; i < count; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1298) if (pages[i])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1299) __free_pages(pages[i], 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1300) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1301)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1302) kvfree(pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1303) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1304) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1305)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1306) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1307) * Create a mapping in device IO address space for specified pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1308) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1309) static dma_addr_t
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1310) __iommu_create_mapping(struct device *dev, struct page **pages, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1311) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1312) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1313) struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1314) unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1315) dma_addr_t dma_addr, iova;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1316) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1317)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1318) dma_addr = __alloc_iova(mapping, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1319) if (dma_addr == DMA_MAPPING_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1320) return dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1322) iova = dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1323) for (i = 0; i < count; ) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1324) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1325)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1326) unsigned int next_pfn = page_to_pfn(pages[i]) + 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1327) phys_addr_t phys = page_to_phys(pages[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1328) unsigned int len, j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1329)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1330) for (j = i + 1; j < count; j++, next_pfn++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1331) if (page_to_pfn(pages[j]) != next_pfn)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1332) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1333)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1334) len = (j - i) << PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1335) ret = iommu_map(mapping->domain, iova, phys, len,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1336) __dma_info_to_prot(DMA_BIDIRECTIONAL, attrs));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1337) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1338) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1339) iova += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1340) i = j;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1341) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1342) return dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1343) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1344) iommu_unmap(mapping->domain, dma_addr, iova-dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1345) __free_iova(mapping, dma_addr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1346) return DMA_MAPPING_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1347) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1348)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1349) static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1350) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1351) struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1352)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1353) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1354) * add optional in-page offset from iova to size and align
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1355) * result to page size
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1356) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1357) size = PAGE_ALIGN((iova & ~PAGE_MASK) + size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1358) iova &= PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1359)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1360) iommu_unmap(mapping->domain, iova, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1361) __free_iova(mapping, iova, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1362) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1363) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1364)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1365) static struct page **__atomic_get_pages(void *addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1366) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1367) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1368) phys_addr_t phys;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1369)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1370) phys = gen_pool_virt_to_phys(atomic_pool, (unsigned long)addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1371) page = phys_to_page(phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1372)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1373) return (struct page **)page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1374) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1375)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1376) static struct page **__iommu_get_pages(void *cpu_addr, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1377) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1378) if (__in_atomic_pool(cpu_addr, PAGE_SIZE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1379) return __atomic_get_pages(cpu_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1380)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1381) if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1382) return cpu_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1383)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1384) return dma_common_find_pages(cpu_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1385) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1386)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1387) static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1388) dma_addr_t *handle, int coherent_flag,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1389) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1390) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1391) struct page *page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1392) void *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1393)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1394) if (coherent_flag == COHERENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1395) addr = __alloc_simple_buffer(dev, size, gfp, &page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1396) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1397) addr = __alloc_from_pool(size, &page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1398) if (!addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1399) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1400)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1401) *handle = __iommu_create_mapping(dev, &page, size, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1402) if (*handle == DMA_MAPPING_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1403) goto err_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1404)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1405) return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1406)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1407) err_mapping:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1408) __free_from_pool(addr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1409) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1410) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1411)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1412) static void __iommu_free_atomic(struct device *dev, void *cpu_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1413) dma_addr_t handle, size_t size, int coherent_flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1414) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1415) __iommu_remove_mapping(dev, handle, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1416) if (coherent_flag == COHERENT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1417) __dma_free_buffer(virt_to_page(cpu_addr), size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1418) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1419) __free_from_pool(cpu_addr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1420) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1421)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1422) static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1423) dma_addr_t *handle, gfp_t gfp, unsigned long attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1424) int coherent_flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1425) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1426) pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1427) struct page **pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1428) void *addr = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1429)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1430) *handle = DMA_MAPPING_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1431) size = PAGE_ALIGN(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1432)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1433) if (coherent_flag == COHERENT || !gfpflags_allow_blocking(gfp))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1434) return __iommu_alloc_simple(dev, size, gfp, handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1435) coherent_flag, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1436)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1437) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1438) * Following is a work-around (a.k.a. hack) to prevent pages
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1439) * with __GFP_COMP being passed to split_page() which cannot
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1440) * handle them. The real problem is that this flag probably
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1441) * should be 0 on ARM as it is not supported on this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1442) * platform; see CONFIG_HUGETLBFS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1443) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1444) gfp &= ~(__GFP_COMP);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1445)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1446) pages = __iommu_alloc_buffer(dev, size, gfp, attrs, coherent_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1447) if (!pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1448) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1449)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1450) *handle = __iommu_create_mapping(dev, pages, size, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1451) if (*handle == DMA_MAPPING_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1452) goto err_buffer;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1453)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1454) if (attrs & DMA_ATTR_NO_KERNEL_MAPPING)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1455) return pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1456)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1457) addr = dma_common_pages_remap(pages, size, prot,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1458) __builtin_return_address(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1459) if (!addr)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1460) goto err_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1461)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1462) return addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1463)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1464) err_mapping:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1465) __iommu_remove_mapping(dev, *handle, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1466) err_buffer:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1467) __iommu_free_buffer(dev, pages, size, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1468) return NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1469) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1470)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1471) static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1472) dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1473) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1474) return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, NORMAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1475) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1476)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1477) static void *arm_coherent_iommu_alloc_attrs(struct device *dev, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1478) dma_addr_t *handle, gfp_t gfp, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1479) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1480) return __arm_iommu_alloc_attrs(dev, size, handle, gfp, attrs, COHERENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1481) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1482)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1483) static int __arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1484) void *cpu_addr, dma_addr_t dma_addr, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1485) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1486) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1487) struct page **pages = __iommu_get_pages(cpu_addr, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1488) unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1489) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1490)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1491) if (!pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1492) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1493)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1494) if (vma->vm_pgoff >= nr_pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1495) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1496)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1497) err = vm_map_pages(vma, pages, nr_pages);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1498) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1499) pr_err("Remapping memory failed: %d\n", err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1500)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1501) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1502) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1503) static int arm_iommu_mmap_attrs(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1504) struct vm_area_struct *vma, void *cpu_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1505) dma_addr_t dma_addr, size_t size, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1506) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1507) vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1508)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1509) return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1510) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1511)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1512) static int arm_coherent_iommu_mmap_attrs(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1513) struct vm_area_struct *vma, void *cpu_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1514) dma_addr_t dma_addr, size_t size, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1515) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1516) return __arm_iommu_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1517) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1518)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1519) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1520) * free a page as defined by the above mapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1521) * Must not be called with IRQs disabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1522) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1523) static void __arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1524) dma_addr_t handle, unsigned long attrs, int coherent_flag)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1525) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1526) struct page **pages;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1527) size = PAGE_ALIGN(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1528)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1529) if (coherent_flag == COHERENT || __in_atomic_pool(cpu_addr, size)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1530) __iommu_free_atomic(dev, cpu_addr, handle, size, coherent_flag);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1531) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1532) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1533)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1534) pages = __iommu_get_pages(cpu_addr, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1535) if (!pages) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1536) WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1537) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1538) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1539)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1540) if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1541) dma_common_free_remap(cpu_addr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1542)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1543) __iommu_remove_mapping(dev, handle, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1544) __iommu_free_buffer(dev, pages, size, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1545) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1546)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1547) static void arm_iommu_free_attrs(struct device *dev, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1548) void *cpu_addr, dma_addr_t handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1549) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1550) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1551) __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, NORMAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1552) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1553)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1554) static void arm_coherent_iommu_free_attrs(struct device *dev, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1555) void *cpu_addr, dma_addr_t handle, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1556) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1557) __arm_iommu_free_attrs(dev, size, cpu_addr, handle, attrs, COHERENT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1558) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1559)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1560) static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1561) void *cpu_addr, dma_addr_t dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1562) size_t size, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1563) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1564) unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1565) struct page **pages = __iommu_get_pages(cpu_addr, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1566)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1567) if (!pages)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1568) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1569)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1570) return sg_alloc_table_from_pages(sgt, pages, count, 0, size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1571) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1572) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1573)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1574) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1575) * Map a part of the scatter-gather list into contiguous io address space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1576) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1577) static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1578) size_t size, dma_addr_t *handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1579) enum dma_data_direction dir, unsigned long attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1580) bool is_coherent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1581) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1582) struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1583) dma_addr_t iova, iova_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1584) int ret = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1585) unsigned int count;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1586) struct scatterlist *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1587) int prot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1588)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1589) size = PAGE_ALIGN(size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1590) *handle = DMA_MAPPING_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1591)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1592) iova_base = iova = __alloc_iova(mapping, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1593) if (iova == DMA_MAPPING_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1594) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1595)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1596) for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1597) phys_addr_t phys = page_to_phys(sg_page(s));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1598) unsigned int len = PAGE_ALIGN(s->offset + s->length);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1599)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1600) if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1601) __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1602)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1603) prot = __dma_info_to_prot(dir, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1604)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1605) ret = iommu_map(mapping->domain, iova, phys, len, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1606) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1607) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1608) count += len >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1609) iova += len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1610) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1611) *handle = iova_base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1612)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1613) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1614) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1615) iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1616) __free_iova(mapping, iova_base, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1617) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1618) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1619)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1620) static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1621) enum dma_data_direction dir, unsigned long attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1622) bool is_coherent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1623) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1624) struct scatterlist *s = sg, *dma = sg, *start = sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1625) int i, count = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1626) unsigned int offset = s->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1627) unsigned int size = s->offset + s->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1628) unsigned int max = dma_get_max_seg_size(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1629)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1630) for (i = 1; i < nents; i++) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1631) s = sg_next(s);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1632)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1633) s->dma_address = DMA_MAPPING_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1634) s->dma_length = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1635)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1636) if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1637) if (__map_sg_chunk(dev, start, size, &dma->dma_address,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1638) dir, attrs, is_coherent) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1639) goto bad_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1640)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1641) dma->dma_address += offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1642) dma->dma_length = size - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1643)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1644) size = offset = s->offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1645) start = s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1646) dma = sg_next(dma);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1647) count += 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1648) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1649) size += s->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1650) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1651) if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1652) is_coherent) < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1653) goto bad_mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1654)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1655) dma->dma_address += offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1656) dma->dma_length = size - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1657)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1658) return count+1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1659)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1660) bad_mapping:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1661) for_each_sg(sg, s, count, i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1662) __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1663) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1664) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1665)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1666) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1667) * arm_coherent_iommu_map_sg - map a set of SG buffers for streaming mode DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1668) * @dev: valid struct device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1669) * @sg: list of buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1670) * @nents: number of buffers to map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1671) * @dir: DMA transfer direction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1672) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1673) * Map a set of i/o coherent buffers described by scatterlist in streaming
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1674) * mode for DMA. The scatter gather list elements are merged together (if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1675) * possible) and tagged with the appropriate dma address and length. They are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1676) * obtained via sg_dma_{address,length}.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1677) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1678) static int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1679) int nents, enum dma_data_direction dir, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1680) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1681) return __iommu_map_sg(dev, sg, nents, dir, attrs, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1682) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1683)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1684) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1685) * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1686) * @dev: valid struct device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1687) * @sg: list of buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1688) * @nents: number of buffers to map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1689) * @dir: DMA transfer direction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1690) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1691) * Map a set of buffers described by scatterlist in streaming mode for DMA.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1692) * The scatter gather list elements are merged together (if possible) and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1693) * tagged with the appropriate dma address and length. They are obtained via
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1694) * sg_dma_{address,length}.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1695) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1696) static int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1697) int nents, enum dma_data_direction dir, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1698) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1699) return __iommu_map_sg(dev, sg, nents, dir, attrs, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1700) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1701)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1702) static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1703) int nents, enum dma_data_direction dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1704) unsigned long attrs, bool is_coherent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1705) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1706) struct scatterlist *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1707) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1708)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1709) for_each_sg(sg, s, nents, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1710) if (sg_dma_len(s))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1711) __iommu_remove_mapping(dev, sg_dma_address(s),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1712) sg_dma_len(s));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1713) if (!is_coherent && (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1714) __dma_page_dev_to_cpu(sg_page(s), s->offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1715) s->length, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1716) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1717) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1718)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1719) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1720) * arm_coherent_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1721) * @dev: valid struct device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1722) * @sg: list of buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1723) * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1724) * @dir: DMA transfer direction (same as was passed to dma_map_sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1725) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1726) * Unmap a set of streaming mode DMA translations. Again, CPU access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1727) * rules concerning calls here are the same as for dma_unmap_single().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1728) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1729) static void arm_coherent_iommu_unmap_sg(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1730) struct scatterlist *sg, int nents, enum dma_data_direction dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1731) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1732) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1733) __iommu_unmap_sg(dev, sg, nents, dir, attrs, true);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1734) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1735)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1736) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1737) * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1738) * @dev: valid struct device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1739) * @sg: list of buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1740) * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1741) * @dir: DMA transfer direction (same as was passed to dma_map_sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1742) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1743) * Unmap a set of streaming mode DMA translations. Again, CPU access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1744) * rules concerning calls here are the same as for dma_unmap_single().
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1745) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1746) static void arm_iommu_unmap_sg(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1747) struct scatterlist *sg, int nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1748) enum dma_data_direction dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1749) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1750) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1751) __iommu_unmap_sg(dev, sg, nents, dir, attrs, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1752) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1753)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1754) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1755) * arm_iommu_sync_sg_for_cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1756) * @dev: valid struct device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1757) * @sg: list of buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1758) * @nents: number of buffers to map (returned from dma_map_sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1759) * @dir: DMA transfer direction (same as was passed to dma_map_sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1760) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1761) static void arm_iommu_sync_sg_for_cpu(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1762) struct scatterlist *sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1763) int nents, enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1764) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1765) struct scatterlist *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1766) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1767)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1768) for_each_sg(sg, s, nents, i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1769) __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1770)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1771) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1772)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1773) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1774) * arm_iommu_sync_sg_for_device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1775) * @dev: valid struct device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1776) * @sg: list of buffers
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1777) * @nents: number of buffers to map (returned from dma_map_sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1778) * @dir: DMA transfer direction (same as was passed to dma_map_sg)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1779) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1780) static void arm_iommu_sync_sg_for_device(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1781) struct scatterlist *sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1782) int nents, enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1783) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1784) struct scatterlist *s;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1785) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1786)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1787) for_each_sg(sg, s, nents, i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1788) __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1789) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1790)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1791)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1792) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1793) * arm_coherent_iommu_map_page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1794) * @dev: valid struct device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1795) * @page: page that buffer resides in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1796) * @offset: offset into page for start of buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1797) * @size: size of buffer to map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1798) * @dir: DMA transfer direction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1799) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1800) * Coherent IOMMU aware version of arm_dma_map_page()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1801) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1802) static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1803) unsigned long offset, size_t size, enum dma_data_direction dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1804) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1805) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1806) struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1807) dma_addr_t dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1808) int ret, prot, len = PAGE_ALIGN(size + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1809)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1810) dma_addr = __alloc_iova(mapping, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1811) if (dma_addr == DMA_MAPPING_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1812) return dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1813)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1814) prot = __dma_info_to_prot(dir, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1815)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1816) ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1817) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1818) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1819)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1820) return dma_addr + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1821) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1822) __free_iova(mapping, dma_addr, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1823) return DMA_MAPPING_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1824) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1825)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1826) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1827) * arm_iommu_map_page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1828) * @dev: valid struct device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1829) * @page: page that buffer resides in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1830) * @offset: offset into page for start of buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1831) * @size: size of buffer to map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1832) * @dir: DMA transfer direction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1833) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1834) * IOMMU aware version of arm_dma_map_page()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1835) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1836) static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1837) unsigned long offset, size_t size, enum dma_data_direction dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1838) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1839) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1840) if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1841) __dma_page_cpu_to_dev(page, offset, size, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1842)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1843) return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1844) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1845)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1846) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1847) * arm_coherent_iommu_unmap_page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1848) * @dev: valid struct device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1849) * @handle: DMA address of buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1850) * @size: size of buffer (same as passed to dma_map_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1851) * @dir: DMA transfer direction (same as passed to dma_map_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1852) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1853) * Coherent IOMMU aware version of arm_dma_unmap_page()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1854) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1855) static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1856) size_t size, enum dma_data_direction dir, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1857) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1858) struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1859) dma_addr_t iova = handle & PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1860) int offset = handle & ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1861) int len = PAGE_ALIGN(size + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1862)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1863) if (!iova)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1864) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1865)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1866) iommu_unmap(mapping->domain, iova, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1867) __free_iova(mapping, iova, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1868) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1869)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1870) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1871) * arm_iommu_unmap_page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1872) * @dev: valid struct device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1873) * @handle: DMA address of buffer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1874) * @size: size of buffer (same as passed to dma_map_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1875) * @dir: DMA transfer direction (same as passed to dma_map_page)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1876) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1877) * IOMMU aware version of arm_dma_unmap_page()
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1878) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1879) static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1880) size_t size, enum dma_data_direction dir, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1881) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1882) struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1883) dma_addr_t iova = handle & PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1884) struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1885) int offset = handle & ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1886) int len = PAGE_ALIGN(size + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1887)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1888) if (!iova)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1889) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1890)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1891) if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1892) __dma_page_dev_to_cpu(page, offset, size, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1893)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1894) iommu_unmap(mapping->domain, iova, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1895) __free_iova(mapping, iova, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1896) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1897)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1898) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1899) * arm_iommu_map_resource - map a device resource for DMA
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1900) * @dev: valid struct device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1901) * @phys_addr: physical address of resource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1902) * @size: size of resource to map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1903) * @dir: DMA transfer direction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1904) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1905) static dma_addr_t arm_iommu_map_resource(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1906) phys_addr_t phys_addr, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1907) enum dma_data_direction dir, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1908) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1909) struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1910) dma_addr_t dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1911) int ret, prot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1912) phys_addr_t addr = phys_addr & PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1913) unsigned int offset = phys_addr & ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1914) size_t len = PAGE_ALIGN(size + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1915)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1916) dma_addr = __alloc_iova(mapping, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1917) if (dma_addr == DMA_MAPPING_ERROR)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1918) return dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1919)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1920) prot = __dma_info_to_prot(dir, attrs) | IOMMU_MMIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1921)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1922) ret = iommu_map(mapping->domain, dma_addr, addr, len, prot);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1923) if (ret < 0)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1924) goto fail;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1925)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1926) return dma_addr + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1927) fail:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1928) __free_iova(mapping, dma_addr, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1929) return DMA_MAPPING_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1930) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1931)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1932) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1933) * arm_iommu_unmap_resource - unmap a device DMA resource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1934) * @dev: valid struct device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1935) * @dma_handle: DMA address to resource
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1936) * @size: size of resource to map
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1937) * @dir: DMA transfer direction
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1938) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1939) static void arm_iommu_unmap_resource(struct device *dev, dma_addr_t dma_handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1940) size_t size, enum dma_data_direction dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1941) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1942) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1943) struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1944) dma_addr_t iova = dma_handle & PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1945) unsigned int offset = dma_handle & ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1946) size_t len = PAGE_ALIGN(size + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1947)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1948) if (!iova)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1949) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1950)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1951) iommu_unmap(mapping->domain, iova, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1952) __free_iova(mapping, iova, len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1953) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1954)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1955) static void arm_iommu_sync_single_for_cpu(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1956) dma_addr_t handle, size_t size, enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1957) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1958) struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1959) dma_addr_t iova = handle & PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1960) struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1961) unsigned int offset = handle & ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1962)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1963) if (!iova)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1964) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1965)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1966) __dma_page_dev_to_cpu(page, offset, size, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1967) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1968)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1969) static void arm_iommu_sync_single_for_device(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1970) dma_addr_t handle, size_t size, enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1971) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1972) struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1973) dma_addr_t iova = handle & PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1974) struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1975) unsigned int offset = handle & ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1976)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1977) if (!iova)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1978) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1979)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1980) __dma_page_cpu_to_dev(page, offset, size, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1981) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1982)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1983) static const struct dma_map_ops iommu_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1984) .alloc = arm_iommu_alloc_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1985) .free = arm_iommu_free_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1986) .mmap = arm_iommu_mmap_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1987) .get_sgtable = arm_iommu_get_sgtable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1988)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1989) .map_page = arm_iommu_map_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1990) .unmap_page = arm_iommu_unmap_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1991) .sync_single_for_cpu = arm_iommu_sync_single_for_cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1992) .sync_single_for_device = arm_iommu_sync_single_for_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1993)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1994) .map_sg = arm_iommu_map_sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1995) .unmap_sg = arm_iommu_unmap_sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1996) .sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1997) .sync_sg_for_device = arm_iommu_sync_sg_for_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1998)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1999) .map_resource = arm_iommu_map_resource,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2000) .unmap_resource = arm_iommu_unmap_resource,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2001)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2002) .dma_supported = arm_dma_supported,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2003) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2004)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2005) static const struct dma_map_ops iommu_coherent_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2006) .alloc = arm_coherent_iommu_alloc_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2007) .free = arm_coherent_iommu_free_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2008) .mmap = arm_coherent_iommu_mmap_attrs,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2009) .get_sgtable = arm_iommu_get_sgtable,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2010)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2011) .map_page = arm_coherent_iommu_map_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2012) .unmap_page = arm_coherent_iommu_unmap_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2013)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2014) .map_sg = arm_coherent_iommu_map_sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2015) .unmap_sg = arm_coherent_iommu_unmap_sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2016)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2017) .map_resource = arm_iommu_map_resource,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2018) .unmap_resource = arm_iommu_unmap_resource,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2019)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2020) .dma_supported = arm_dma_supported,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2021) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2022)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2023) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2024) * arm_iommu_create_mapping
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2025) * @bus: pointer to the bus holding the client device (for IOMMU calls)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2026) * @base: start address of the valid IO address space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2027) * @size: maximum size of the valid IO address space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2028) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2029) * Creates a mapping structure which holds information about used/unused
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2030) * IO address ranges, which is required to perform memory allocation and
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2031) * mapping with IOMMU aware functions.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2032) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2033) * The client device need to be attached to the mapping with
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2034) * arm_iommu_attach_device function.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2035) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2036) struct dma_iommu_mapping *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2037) arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, u64 size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2038) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2039) unsigned int bits = size >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2040) unsigned int bitmap_size = BITS_TO_LONGS(bits) * sizeof(long);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2041) struct dma_iommu_mapping *mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2042) int extensions = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2043) int err = -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2044)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2045) /* currently only 32-bit DMA address space is supported */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2046) if (size > DMA_BIT_MASK(32) + 1)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2047) return ERR_PTR(-ERANGE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2048)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2049) if (!bitmap_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2050) return ERR_PTR(-EINVAL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2051)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2052) if (bitmap_size > PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2053) extensions = bitmap_size / PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2054) bitmap_size = PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2055) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2056)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2057) mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2058) if (!mapping)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2059) goto err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2060)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2061) mapping->bitmap_size = bitmap_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2062) mapping->bitmaps = kcalloc(extensions, sizeof(unsigned long *),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2063) GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2064) if (!mapping->bitmaps)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2065) goto err2;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2066)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2067) mapping->bitmaps[0] = kzalloc(bitmap_size, GFP_KERNEL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2068) if (!mapping->bitmaps[0])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2069) goto err3;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2070)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2071) mapping->nr_bitmaps = 1;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2072) mapping->extensions = extensions;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2073) mapping->base = base;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2074) mapping->bits = BITS_PER_BYTE * bitmap_size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2075)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2076) spin_lock_init(&mapping->lock);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2077)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2078) mapping->domain = iommu_domain_alloc(bus);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2079) if (!mapping->domain)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2080) goto err4;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2081)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2082) kref_init(&mapping->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2083) return mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2084) err4:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2085) kfree(mapping->bitmaps[0]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2086) err3:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2087) kfree(mapping->bitmaps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2088) err2:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2089) kfree(mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2090) err:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2091) return ERR_PTR(err);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2092) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2093) EXPORT_SYMBOL_GPL(arm_iommu_create_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2094)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2095) static void release_iommu_mapping(struct kref *kref)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2096) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2097) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2098) struct dma_iommu_mapping *mapping =
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2099) container_of(kref, struct dma_iommu_mapping, kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2100)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2101) iommu_domain_free(mapping->domain);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2102) for (i = 0; i < mapping->nr_bitmaps; i++)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2103) kfree(mapping->bitmaps[i]);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2104) kfree(mapping->bitmaps);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2105) kfree(mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2108) static int extend_iommu_mapping(struct dma_iommu_mapping *mapping)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2109) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2110) int next_bitmap;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2111)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2112) if (mapping->nr_bitmaps >= mapping->extensions)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2113) return -EINVAL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2114)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2115) next_bitmap = mapping->nr_bitmaps;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2116) mapping->bitmaps[next_bitmap] = kzalloc(mapping->bitmap_size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2117) GFP_ATOMIC);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2118) if (!mapping->bitmaps[next_bitmap])
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2119) return -ENOMEM;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2120)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2121) mapping->nr_bitmaps++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2122)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2123) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2125)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2126) void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2127) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2128) if (mapping)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2129) kref_put(&mapping->kref, release_iommu_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2130) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2131) EXPORT_SYMBOL_GPL(arm_iommu_release_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2132)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2133) static int __arm_iommu_attach_device(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2134) struct dma_iommu_mapping *mapping)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2135) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2136) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2137)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2138) err = iommu_attach_device(mapping->domain, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2139) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2140) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2141)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2142) kref_get(&mapping->kref);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2143) to_dma_iommu_mapping(dev) = mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2145) pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2146) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2147) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2148)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2149) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2150) * arm_iommu_attach_device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2151) * @dev: valid struct device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2152) * @mapping: io address space mapping structure (returned from
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2153) * arm_iommu_create_mapping)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2154) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2155) * Attaches specified io address space mapping to the provided device.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2156) * This replaces the dma operations (dma_map_ops pointer) with the
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2157) * IOMMU aware version.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2158) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2159) * More than one client might be attached to the same io address space
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2160) * mapping.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2161) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2162) int arm_iommu_attach_device(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2163) struct dma_iommu_mapping *mapping)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2164) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2165) int err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2167) err = __arm_iommu_attach_device(dev, mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2168) if (err)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2169) return err;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2170)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2171) set_dma_ops(dev, &iommu_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2172) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2173) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2174) EXPORT_SYMBOL_GPL(arm_iommu_attach_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2175)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2176) /**
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2177) * arm_iommu_detach_device
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2178) * @dev: valid struct device pointer
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2179) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2180) * Detaches the provided device from a previously attached map.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2181) * This overwrites the dma_ops pointer with appropriate non-IOMMU ops.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2182) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2183) void arm_iommu_detach_device(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2184) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2185) struct dma_iommu_mapping *mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2186)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2187) mapping = to_dma_iommu_mapping(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2188) if (!mapping) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2189) dev_warn(dev, "Not attached\n");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2190) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2191) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2192)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2193) iommu_detach_device(mapping->domain, dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2194) kref_put(&mapping->kref, release_iommu_mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2195) to_dma_iommu_mapping(dev) = NULL;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2196) set_dma_ops(dev, arm_get_dma_map_ops(dev->archdata.dma_coherent));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2197)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2198) pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2199) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2200) EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2201)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2202) static const struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2203) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2204) return coherent ? &iommu_coherent_ops : &iommu_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2205) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2206)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2207) static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2208) const struct iommu_ops *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2209) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2210) struct dma_iommu_mapping *mapping;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2212) if (!iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2213) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2214)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2215) mapping = arm_iommu_create_mapping(dev->bus, dma_base, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2216) if (IS_ERR(mapping)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2217) pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2218) size, dev_name(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2219) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2220) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2221)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2222) if (__arm_iommu_attach_device(dev, mapping)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2223) pr_warn("Failed to attached device %s to IOMMU_mapping\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2224) dev_name(dev));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2225) arm_iommu_release_mapping(mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2226) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2227) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2228)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2229) return true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2230) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2231)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2232) static void arm_teardown_iommu_dma_ops(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2233) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2234) struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2235)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2236) if (!mapping)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2237) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2238)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2239) arm_iommu_detach_device(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2240) arm_iommu_release_mapping(mapping);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2241) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2242)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2243) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2244)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2245) static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2246) const struct iommu_ops *iommu)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2247) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2248) return false;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2249) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2250)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2251) static void arm_teardown_iommu_dma_ops(struct device *dev) { }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2252)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2253) #define arm_get_iommu_dma_map_ops arm_get_dma_map_ops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2254)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2255) #endif /* CONFIG_ARM_DMA_USE_IOMMU */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2256)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2257) void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2258) const struct iommu_ops *iommu, bool coherent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2259) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2260) const struct dma_map_ops *dma_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2261)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2262) dev->archdata.dma_coherent = coherent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2263) #ifdef CONFIG_SWIOTLB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2264) dev->dma_coherent = coherent;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2265) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2266)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2267) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2268) * Don't override the dma_ops if they have already been set. Ideally
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2269) * this should be the only location where dma_ops are set, remove this
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2270) * check when all other callers of set_dma_ops will have disappeared.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2271) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2272) if (dev->dma_ops)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2273) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2274)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2275) if (arm_setup_iommu_dma_ops(dev, dma_base, size, iommu))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2276) dma_ops = arm_get_iommu_dma_map_ops(coherent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2277) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2278) dma_ops = arm_get_dma_map_ops(coherent);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2279)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2280) set_dma_ops(dev, dma_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2281)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2282) #ifdef CONFIG_XEN
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2283) if (xen_initial_domain())
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2284) dev->dma_ops = &xen_swiotlb_dma_ops;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2285) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2286) dev->archdata.dma_ops_setup = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2287) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2288)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2289) void arch_teardown_dma_ops(struct device *dev)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2290) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2291) if (!dev->archdata.dma_ops_setup)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2292) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2293)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2294) arm_teardown_iommu_dma_ops(dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2295) /* Let arch_setup_dma_ops() start again from scratch upon re-probe */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2296) set_dma_ops(dev, NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2297) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2298)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2299) #ifdef CONFIG_SWIOTLB
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2300) void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2301) enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2302) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2303) __dma_page_cpu_to_dev(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2304) size, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2305) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2306)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2307) void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2308) enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2309) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2310) __dma_page_dev_to_cpu(phys_to_page(paddr), paddr & (PAGE_SIZE - 1),
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2311) size, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2312) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2313)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2314) void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2315) gfp_t gfp, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2316) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2317) return __dma_alloc(dev, size, dma_handle, gfp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2318) __get_dma_pgprot(attrs, PAGE_KERNEL), false,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2319) attrs, __builtin_return_address(0));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2320) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2321)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2322) void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2323) dma_addr_t dma_handle, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2324) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2325) __arm_dma_free(dev, size, cpu_addr, dma_handle, attrs, false);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2326) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2327) #endif /* CONFIG_SWIOTLB */