^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) // Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) #include <linux/cache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) #include <linux/dma-map-ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) #include <linux/genalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #include <linux/highmem.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) #include <linux/io.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/mm.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/scatterlist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/types.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/version.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <asm/cache.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) static inline void cache_op(phys_addr_t paddr, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) void (*fn)(unsigned long start, unsigned long end))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) struct page *page = phys_to_page(paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) void *start = __va(page_to_phys(page));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) unsigned long offset = offset_in_page(paddr);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) size_t left = size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) do {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) size_t len = left;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) if (offset + len > PAGE_SIZE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) len = PAGE_SIZE - offset;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) if (PageHighMem(page)) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) start = kmap_atomic(page);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) fn((unsigned long)start + offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) (unsigned long)start + offset + len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) kunmap_atomic(start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37) fn((unsigned long)start + offset,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) (unsigned long)start + offset + len);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) offset = 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) page++;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) start += PAGE_SIZE;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44) left -= len;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) } while (left);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48) static void dma_wbinv_set_zero_range(unsigned long start, unsigned long end)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) memset((void *)start, 0, end - start);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) dma_wbinv_range(start, end);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) void arch_dma_prep_coherent(struct page *page, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) cache_op(page_to_phys(page), size, dma_wbinv_set_zero_range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) switch (dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63) case DMA_TO_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) cache_op(paddr, size, dma_wb_range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) case DMA_FROM_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) case DMA_BIDIRECTIONAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) cache_op(paddr, size, dma_wbinv_range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) switch (dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) case DMA_TO_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) return;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81) case DMA_FROM_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) case DMA_BIDIRECTIONAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) cache_op(paddr, size, dma_inv_range);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86) BUG();
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) }