^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-only
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/dma-map-ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <asm/cache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * ARCH specific callbacks for generic noncoherent DMA ops
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) * - hardware IOC not available (or "dma-coherent" not set for device in DT)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * - But still handle both coherent and non-coherent requests from caller
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) * For DMA coherent hardware (IOC) generic code suffices
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) void arch_dma_prep_coherent(struct page *page, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) * Evict any existing L1 and/or L2 lines for the backing page
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) * in case it was used earlier as a normal "cached" page.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) * Yeah this bit us - STAR 9000898266
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) * Although core does call flush_cache_vmap(), it gets kvaddr hence
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) * can't be used to efficiently flush L1 and/or L2 which need paddr
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) * Currently flush_cache_vmap nukes the L1 cache completely which
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) * will be optimized as a separate commit
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) dma_cache_wback_inv(page_to_phys(page), size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * Cache operations depending on function and direction argument, inspired by
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) * https://lkml.org/lkml/2018/5/18/979
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) * "dma_sync_*_for_cpu and direction=TO_DEVICE (was Re: [PATCH 02/20]
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) * dma-mapping: provide a generic dma-noncoherent implementation)"
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) * | map == for_device | unmap == for_cpu
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) * |----------------------------------------------------------------
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) * TO_DEV | writeback writeback | none none
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) * FROM_DEV | invalidate invalidate | invalidate* invalidate*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) * BIDIR | writeback+inv writeback+inv | invalidate invalidate
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) * [*] needed for CPU speculative prefetches
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) * NOTE: we don't check the validity of direction argument as it is done in
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) * upper layer functions (in include/linux/dma-mapping.h)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) switch (dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) case DMA_TO_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) dma_cache_wback(paddr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) case DMA_FROM_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) dma_cache_inv(paddr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) case DMA_BIDIRECTIONAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) dma_cache_wback_inv(paddr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) switch (dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) case DMA_TO_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) /* FROM_DEVICE invalidate needed if speculative CPU prefetch only */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) case DMA_FROM_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) case DMA_BIDIRECTIONAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) dma_cache_inv(paddr, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * Plug in direct dma map ops.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) const struct iommu_ops *iommu, bool coherent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) * IOC hardware snoops all DMA traffic keeping the caches consistent
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) * with memory - eliding need for any explicit cache maintenance of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) * DMA buffers.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) if (is_isa_arcv2() && ioc_enable && coherent)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) dev->dma_coherent = true;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) dev_info(dev, "use %scoherent DMA ops\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) dev->dma_coherent ? "" : "non");
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) }