^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * Copyright (C) 2011 Tobias Klauser <tklauser@distanz.ch>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * Copyright (C) 2009 Wind River Systems Inc
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * Based on DMA code from MIPS.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) * This file is subject to the terms and conditions of the GNU General Public
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * License. See the file "COPYING" in the main directory of this archive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/string.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/dma-mapping.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <linux/cache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) void *vaddr = phys_to_virt(paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) switch (dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) case DMA_FROM_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) invalidate_dcache_range((unsigned long)vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) (unsigned long)(vaddr + size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) case DMA_TO_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * We just need to flush the caches here , but Nios2 flush
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * instruction will do both writeback and invalidate.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) case DMA_BIDIRECTIONAL: /* flush and invalidate */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) flush_dcache_range((unsigned long)vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) (unsigned long)(vaddr + size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) void *vaddr = phys_to_virt(paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) switch (dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) case DMA_BIDIRECTIONAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) case DMA_FROM_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) invalidate_dcache_range((unsigned long)vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) (unsigned long)(vaddr + size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) case DMA_TO_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) void arch_dma_prep_coherent(struct page *page, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) unsigned long start = (unsigned long)page_address(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) flush_dcache_range(start, start + size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) void *arch_dma_set_uncached(void *ptr, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) unsigned long addr = (unsigned long)ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) addr |= CONFIG_NIOS2_IO_REGION_BASE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) return (void *)ptr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }