^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 1) // SPDX-License-Identifier: GPL-2.0-or-later
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 2) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 3) * OpenRISC Linux
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 4) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 5) * Linux architectural port borrowing liberally from similar works of
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 6) * others. All original copyrights apply as per the original source
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 7) * declaration.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 8) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 9) * Modifications for the OpenRISC architecture:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 10) * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 11) * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 12) *
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 13) * DMA mapping callbacks...
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 14) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 15)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 16) #include <linux/dma-map-ops.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 17) #include <linux/pagewalk.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 18)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 19) #include <asm/cpuinfo.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 20) #include <asm/spr_defs.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 21) #include <asm/tlbflush.h>
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 22)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 23) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 24) page_set_nocache(pte_t *pte, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 25) unsigned long next, struct mm_walk *walk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 26) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 27) unsigned long cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 28) struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 29)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 30) pte_val(*pte) |= _PAGE_CI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 31)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 32) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 33) * Flush the page out of the TLB so that the new page flags get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 34) * picked up next time there's an access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 35) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 36) flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 37)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 38) /* Flush page out of dcache */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 39) for (cl = __pa(addr); cl < __pa(next); cl += cpuinfo->dcache_block_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 40) mtspr(SPR_DCBFR, cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 41)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 42) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 43) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 44)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 45) static const struct mm_walk_ops set_nocache_walk_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 46) .pte_entry = page_set_nocache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 47) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 48)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 49) static int
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 50) page_clear_nocache(pte_t *pte, unsigned long addr,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 51) unsigned long next, struct mm_walk *walk)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 52) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 53) pte_val(*pte) &= ~_PAGE_CI;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 54)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 55) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 56) * Flush the page out of the TLB so that the new page flags get
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 57) * picked up next time there's an access
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 58) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 59) flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 60)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 61) return 0;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 62) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 63)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 64) static const struct mm_walk_ops clear_nocache_walk_ops = {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 65) .pte_entry = page_clear_nocache,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 66) };
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 67)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 68) void *arch_dma_set_uncached(void *cpu_addr, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 69) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 70) unsigned long va = (unsigned long)cpu_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 71) int error;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 72)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 73) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 74) * We need to iterate through the pages, clearing the dcache for
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 75) * them and setting the cache-inhibit bit.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 76) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 77) mmap_read_lock(&init_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 78) error = walk_page_range(&init_mm, va, va + size, &set_nocache_walk_ops,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 79) NULL);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 80) mmap_read_unlock(&init_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 81)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 82) if (error)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 83) return ERR_PTR(error);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 84) return cpu_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 85) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 86)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 87) void arch_dma_clear_uncached(void *cpu_addr, size_t size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 88) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 89) unsigned long va = (unsigned long)cpu_addr;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 90)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 91) mmap_read_lock(&init_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 92) /* walk_page_range shouldn't be able to fail here */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 93) WARN_ON(walk_page_range(&init_mm, va, va + size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 94) &clear_nocache_walk_ops, NULL));
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 95) mmap_read_unlock(&init_mm);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 96) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 97)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 98) void arch_sync_dma_for_device(phys_addr_t addr, size_t size,
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 99) enum dma_data_direction dir)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 100) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 101) unsigned long cl;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 102) struct cpuinfo_or1k *cpuinfo = &cpuinfo_or1k[smp_processor_id()];
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 103)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 104) switch (dir) {
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 105) case DMA_TO_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 106) /* Flush the dcache for the requested range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 107) for (cl = addr; cl < addr + size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 108) cl += cpuinfo->dcache_block_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 109) mtspr(SPR_DCBFR, cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 110) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 111) case DMA_FROM_DEVICE:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 112) /* Invalidate the dcache for the requested range */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 113) for (cl = addr; cl < addr + size;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 114) cl += cpuinfo->dcache_block_size)
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 115) mtspr(SPR_DCBIR, cl);
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 116) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 117) default:
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 118) /*
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 119) * NOTE: If dir == DMA_BIDIRECTIONAL then there's no need to
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 120) * flush nor invalidate the cache here as the area will need
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 121) * to be manually synced anyway.
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 122) */
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 123) break;
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 124) }
^8f3ce5b39 (kx 2023-10-28 12:00:06 +0300 125) }