^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) * This file is subject to the terms and conditions of the GNU General Public
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * License. See the file COPYING in the main directory of this archive
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) * for more details.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) #undef DEBUG
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) #include <linux/dma-map-ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) #include <linux/device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) #include <linux/kernel.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) #include <linux/platform_device.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) #include <linux/scatterlist.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) #include <linux/slab.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15) #include <linux/vmalloc.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/export.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #if defined(CONFIG_MMU) && !defined(CONFIG_COLDFIRE)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) void arch_dma_prep_coherent(struct page *page, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) cache_push(page_to_phys(page), size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) pgprot_t pgprot_dmacoherent(pgprot_t prot)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) if (CPU_IS_040_OR_060) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29) pgprot_val(prot) &= ~_PAGE_CACHE040;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) pgprot_val(prot) |= _PAGE_GLOBAL040 | _PAGE_NOCACHE_S;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31) } else {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) pgprot_val(prot) |= _PAGE_NOCACHE030;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) return prot;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) #else
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) #include <asm/cacheflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41) gfp_t gfp, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) void *ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) if (dev == NULL || (*dev->dma_mask < 0xffffffff))
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) gfp |= GFP_DMA;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) ret = (void *)__get_free_pages(gfp, get_order(size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) if (ret != NULL) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) memset(ret, 0, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) *dma_handle = virt_to_phys(ret);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) return ret;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) void arch_dma_free(struct device *dev, size_t size, void *vaddr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) dma_addr_t dma_handle, unsigned long attrs)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) free_pages((unsigned long)vaddr, get_order(size));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) #endif /* CONFIG_MMU && !CONFIG_COLDFIRE */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) void arch_sync_dma_for_device(phys_addr_t handle, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67) switch (dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) case DMA_BIDIRECTIONAL:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) case DMA_TO_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) cache_push(handle, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72) case DMA_FROM_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) cache_clear(handle, size);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) pr_err_ratelimited("dma_sync_single_for_device: unsupported dir %u\n",
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) dir);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) }