^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Based on linux/arch/arm/mm/dma-mapping.c
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2000-2004 Russell King
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/dma-direct.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/dma-map-ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/scatterlist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <asm/cachetype.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <asm/outercache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <asm/cp15.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include "dma.h"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * The generic direct mapping code is used if
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * - MMU/MPU is off
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) * - cpu is v7m w/o cache support
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * - device is coherent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * otherwise arm_nommu_dma_ops is used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * arm_nommu_dma_ops rely on consistent DMA memory (please, refer to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) * [1] on how to declare such memory).
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) * [1] Documentation/devicetree/bindings/reserved-memory/reserved-memory.txt
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) static void *arm_nommu_dma_alloc(struct device *dev, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) dma_addr_t *dma_handle, gfp_t gfp,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) void *ret = dma_alloc_from_global_coherent(dev, size, dma_handle);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * dma_alloc_from_global_coherent() may fail because:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) * - no consistent DMA region has been defined, so we can't
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * continue.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) * - there is no space left in consistent DMA region, so we
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * only can fallback to generic allocator if we are
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * advertised that consistency is not required.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) WARN_ON_ONCE(ret == NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) static void arm_nommu_dma_free(struct device *dev, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) void *cpu_addr, dma_addr_t dma_addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) int ret = dma_release_from_global_coherent(get_order(size), cpu_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) WARN_ON_ONCE(ret == 0);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) static int arm_nommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) void *cpu_addr, dma_addr_t dma_addr, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) int ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) return -ENXIO;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) static void __dma_page_cpu_to_dev(phys_addr_t paddr, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) dmac_map_area(__va(paddr), size, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) if (dir == DMA_FROM_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) outer_inv_range(paddr, paddr + size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) outer_clean_range(paddr, paddr + size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) static void __dma_page_dev_to_cpu(phys_addr_t paddr, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) if (dir != DMA_TO_DEVICE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) outer_inv_range(paddr, paddr + size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) dmac_unmap_area(__va(paddr), size, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) static dma_addr_t arm_nommu_dma_map_page(struct device *dev, struct page *page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) unsigned long offset, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) enum dma_data_direction dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) dma_addr_t handle = page_to_phys(page) + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) __dma_page_cpu_to_dev(handle, size, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) return handle;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) static void arm_nommu_dma_unmap_page(struct device *dev, dma_addr_t handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) size_t size, enum dma_data_direction dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) __dma_page_dev_to_cpu(handle, size, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) static int arm_nommu_dma_map_sg(struct device *dev, struct scatterlist *sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) int nents, enum dma_data_direction dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) for_each_sg(sgl, sg, nents, i) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 126) sg_dma_address(sg) = sg_phys(sg);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 127) sg_dma_len(sg) = sg->length;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 128) __dma_page_cpu_to_dev(sg_dma_address(sg), sg_dma_len(sg), dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 129) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 130)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 131) return nents;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 132) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 133)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 134) static void arm_nommu_dma_unmap_sg(struct device *dev, struct scatterlist *sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 135) int nents, enum dma_data_direction dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 136) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 137) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 138) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 139) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 140)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 141) for_each_sg(sgl, sg, nents, i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 142) __dma_page_dev_to_cpu(sg_dma_address(sg), sg_dma_len(sg), dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 143) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 144)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 145) static void arm_nommu_dma_sync_single_for_device(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 146) dma_addr_t handle, size_t size, enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 147) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 148) __dma_page_cpu_to_dev(handle, size, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 149) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 150)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 151) static void arm_nommu_dma_sync_single_for_cpu(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 152) dma_addr_t handle, size_t size, enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 153) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 154) __dma_page_cpu_to_dev(handle, size, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 155) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 156)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 157) static void arm_nommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 158) int nents, enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 159) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 160) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 161) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 162)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 163) for_each_sg(sgl, sg, nents, i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 164) __dma_page_cpu_to_dev(sg_dma_address(sg), sg_dma_len(sg), dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 165) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 166)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 167) static void arm_nommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 168) int nents, enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 169) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 170) struct scatterlist *sg;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 171) int i;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 172)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 173) for_each_sg(sgl, sg, nents, i)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 174) __dma_page_dev_to_cpu(sg_dma_address(sg), sg_dma_len(sg), dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 175) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 176)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 177) const struct dma_map_ops arm_nommu_dma_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 178) .alloc = arm_nommu_dma_alloc,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 179) .free = arm_nommu_dma_free,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 180) .alloc_pages = dma_direct_alloc_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 181) .free_pages = dma_direct_free_pages,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 182) .mmap = arm_nommu_dma_mmap,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 183) .map_page = arm_nommu_dma_map_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 184) .unmap_page = arm_nommu_dma_unmap_page,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 185) .map_sg = arm_nommu_dma_map_sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 186) .unmap_sg = arm_nommu_dma_unmap_sg,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 187) .sync_single_for_device = arm_nommu_dma_sync_single_for_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 188) .sync_single_for_cpu = arm_nommu_dma_sync_single_for_cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 189) .sync_sg_for_device = arm_nommu_dma_sync_sg_for_device,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 190) .sync_sg_for_cpu = arm_nommu_dma_sync_sg_for_cpu,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 191) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 192) EXPORT_SYMBOL(arm_nommu_dma_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 193)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 194) void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 195) const struct iommu_ops *iommu, bool coherent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 196) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 197) if (IS_ENABLED(CONFIG_CPU_V7M)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 198) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 199) * Cache support for v7m is optional, so can be treated as
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 200) * coherent if no cache has been detected. Note that it is not
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 201) * enough to check if MPU is in use or not since in absense of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 202) * MPU system memory map is used.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 203) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 204) dev->archdata.dma_coherent = (cacheid) ? coherent : true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 205) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 206) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 207) * Assume coherent DMA in case MMU/MPU has not been set up.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 208) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 209) dev->archdata.dma_coherent = (get_cr() & CR_M) ? coherent : true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 210) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 211)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 212) if (!dev->archdata.dma_coherent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 213) set_dma_ops(dev, &arm_nommu_dma_ops);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 214) }