^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) // Copyright (C) 2005-2017 Andes Technology Corporation
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/dma-map-ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/cache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <asm/proc-fns.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) static inline void cache_op(phys_addr_t paddr, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) void (*fn)(unsigned long start, unsigned long end))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) unsigned offset = paddr & ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) size_t left = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) unsigned long start;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) size_t len = left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) if (PageHighMem(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) void *addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) if (offset + len > PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) if (offset >= PAGE_SIZE) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) page += offset >> PAGE_SHIFT;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) offset &= ~PAGE_MASK;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) len = PAGE_SIZE - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) addr = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) start = (unsigned long)(addr + offset);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) fn(start, start + len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) kunmap_atomic(addr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) start = (unsigned long)phys_to_virt(paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) fn(start, start + size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) page++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) left -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) } while (left);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) switch (dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) case DMA_FROM_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) case DMA_TO_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) case DMA_BIDIRECTIONAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) cache_op(paddr, size, cpu_dma_wb_range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) switch (dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) case DMA_TO_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) case DMA_FROM_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) case DMA_BIDIRECTIONAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) cache_op(paddr, size, cpu_dma_inval_range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) void arch_dma_prep_coherent(struct page *page, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) cache_op(page_to_phys(page), size, cpu_dma_wbinval_range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) }