^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /* SPDX-License-Identifier: GPL-2.0 */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2018 Christoph Hellwig.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * DMA operations that map physical memory directly without using an IOMMU.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #ifndef _KERNEL_DMA_DIRECT_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #define _KERNEL_DMA_DIRECT_H
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/dma-direct.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) void *cpu_addr, dma_addr_t dma_addr, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) unsigned long attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) bool dma_direct_can_mmap(struct device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) void *cpu_addr, dma_addr_t dma_addr, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) unsigned long attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) enum dma_data_direction dir, unsigned long attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) size_t dma_direct_max_mapping_size(struct device *dev);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) defined(CONFIG_SWIOTLB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) void dma_direct_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) int nents, enum dma_data_direction dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) static inline void dma_direct_sync_sg_for_device(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) struct scatterlist *sgl, int nents, enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) defined(CONFIG_SWIOTLB)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) int nents, enum dma_data_direction dir, unsigned long attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) void dma_direct_sync_sg_for_cpu(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) struct scatterlist *sgl, int nents, enum dma_data_direction dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) static inline void dma_direct_unmap_sg(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) struct scatterlist *sgl, int nents, enum dma_data_direction dir,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static inline void dma_direct_sync_sg_for_cpu(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) struct scatterlist *sgl, int nents, enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) #endif
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) static inline void dma_direct_sync_single_for_device(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) dma_addr_t addr, size_t size, enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) phys_addr_t paddr = dma_to_phys(dev, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) if (unlikely(is_swiotlb_buffer(paddr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) if (!dev_is_dma_coherent(dev))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) arch_sync_dma_for_device(paddr, size, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) static inline void dma_direct_sync_single_for_cpu(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) dma_addr_t addr, size_t size, enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) phys_addr_t paddr = dma_to_phys(dev, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) if (!dev_is_dma_coherent(dev)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) arch_sync_dma_for_cpu(paddr, size, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) arch_sync_dma_for_cpu_all();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) if (unlikely(is_swiotlb_buffer(paddr)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) if (dir == DMA_FROM_DEVICE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) arch_dma_mark_clean(paddr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) static inline dma_addr_t dma_direct_map_page(struct device *dev,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) struct page *page, unsigned long offset, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) enum dma_data_direction dir, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) phys_addr_t phys = page_to_phys(page) + offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) dma_addr_t dma_addr = phys_to_dma(dev, phys);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) if (unlikely(swiotlb_force == SWIOTLB_FORCE))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) return swiotlb_map(dev, phys, size, dir, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) if (swiotlb_force != SWIOTLB_NO_FORCE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) return swiotlb_map(dev, phys, size, dir, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) dev_WARN_ONCE(dev, 1,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) return DMA_MAPPING_ERROR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103) if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) arch_sync_dma_for_device(phys, size, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) return dma_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) size_t size, enum dma_data_direction dir, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) phys_addr_t phys = dma_to_phys(dev, addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) dma_direct_sync_single_for_cpu(dev, addr, size, dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) if (unlikely(is_swiotlb_buffer(phys)))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) swiotlb_tbl_unmap_single(dev, phys, size, size, dir, attrs);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) #endif /* _KERNEL_DMA_DIRECT_H */