^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * DMA coherent memory allocation.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Copyright (C) 2002 - 2005 Tensilica Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Copyright (C) 2015 Cadence Design Systems Inc.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * Based on version for i386.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Chris Zankel <chris@zankel.net>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Joe Taylor <joe@tensilica.com, joetylr@yahoo.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/dma-map-ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/dma-direct.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/gfp.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <asm/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) #include <asm/platform.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) static void do_cache_op(phys_addr_t paddr, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) void (*fn)(unsigned long, unsigned long))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) unsigned long off = paddr & (PAGE_SIZE - 1);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) unsigned long pfn = PFN_DOWN(paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) struct page *page = pfn_to_page(pfn);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) if (!PageHighMem(page))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) fn((unsigned long)phys_to_virt(paddr), size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) while (size > 0) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) size_t sz = min_t(size_t, size, PAGE_SIZE - off);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) void *vaddr = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) fn((unsigned long)vaddr + off, sz);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) kunmap_atomic(vaddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) off = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) ++page;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) size -= sz;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) switch (dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) case DMA_BIDIRECTIONAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) case DMA_FROM_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) do_cache_op(paddr, size, __invalidate_dcache_range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) case DMA_NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) switch (dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) case DMA_BIDIRECTIONAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) case DMA_TO_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) if (XCHAL_DCACHE_IS_WRITEBACK)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) do_cache_op(paddr, size, __flush_dcache_range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) case DMA_NONE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) void arch_dma_prep_coherent(struct page *page, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) __invalidate_dcache_range((unsigned long)page_address(page), size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) * Memory caching is platform-dependent in noMMU xtensa configurations.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90) * This function should be implemented in platform code in order to enable
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) * coherent DMA memory operations when CONFIG_MMU is not enabled.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) #ifdef CONFIG_MMU
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) void *arch_dma_set_uncached(void *p, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) return p + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) #endif /* CONFIG_MMU */